From 53ace904b2b8e2444bc518b2498947c869c8c13b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 01:29:25 +0000 Subject: total WIP skeleton for /room_keys API --- synapse/handlers/e2e_room_keys.py | 60 ++++++++++ synapse/rest/client/v2_alpha/room_keys.py | 56 +++++++++ synapse/storage/e2e_room_keys.py | 133 ++++++++++++++++++++++ synapse/storage/schema/delta/46/e2e_room_keys.sql | 40 +++++++ 4 files changed, 289 insertions(+) create mode 100644 synapse/handlers/e2e_room_keys.py create mode 100644 synapse/rest/client/v2_alpha/room_keys.py create mode 100644 synapse/storage/e2e_room_keys.py create mode 100644 synapse/storage/schema/delta/46/e2e_room_keys.sql diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py new file mode 100644 index 0000000000..78c838a829 --- /dev/null +++ b/synapse/handlers/e2e_room_keys.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ujson as json +import logging + +from canonicaljson import encode_canonical_json +from twisted.internet import defer + +from synapse.api.errors import SynapseError, CodeMessageException +from synapse.types import get_domain_from_id +from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.retryutils import NotRetryingDestination + +logger = logging.getLogger(__name__) + + +class E2eRoomKeysHandler(object): + def __init__(self, hs): + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def get_room_keys(self, user_id, version, room_id, session_id): + results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) + defer.returnValue(results) + + @defer.inlineCallbacks + def upload_room_keys(self, user_id, version, room_keys): + + # TODO: Validate the JSON to make sure it has the right keys. + + # go through the room_keys + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + session = room_keys['rooms'][room_id]['sessions'][session_id] + + # get a lock + + # get the room_key for this particular row + yield self.store.get_e2e_room_key() + + # check whether we merge or not + if() + + # if so, we set it + yield self.store.set_e2e_room_key() + + # release the lock diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py new file mode 100644 index 0000000000..9b93001919 --- /dev/null +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, parse_json_object_from_request, parse_integer +) +from synapse.http.servlet import parse_string +from synapse.types import StreamToken +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class RoomKeysUploadServlet(RestServlet): + PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysUploadServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + + @defer.inlineCallbacks + def on_POST(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + user_id = requester.user.to_string() + body = parse_json_object_from_request(request) + + result = yield self.e2e_room_keys_handler.upload_room_keys( + user_id, version, body + ) + defer.returnValue((200, result)) + + +def register_servlets(hs, http_server): + RoomKeysUploadServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py new file mode 100644 index 0000000000..9f6d47e1b6 --- /dev/null +++ b/synapse/storage/e2e_room_keys.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.internet import defer + +from synapse.util.caches.descriptors import cached + +from canonicaljson import encode_canonical_json +import ujson as json + +from ._base import SQLBaseStore + + +class EndToEndRoomKeyStore(SQLBaseStore): + + @defer.inlineCallbacks + def get_e2e_room_key(self, user_id, version, room_id, session_id): + + row = yield self._simple_select_one( + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "version": version, + "room_id": room_id, + "session_id": session_id, + }, + retcols=( + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_key", + ) + + defer.returnValue(row); + + def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): + + def _set_e2e_room_key_txn(txn): + + self._simple_upsert( + txn, + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + } + values=[ + { + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + } + ], + lock=False, + ) + + return True + + return self.runInteraction( + "set_e2e_room_key", _set_e2e_room_key_txn + ) + + + def set_e2e_room_keys(self, user_id, version, room_keys): + + def _set_e2e_room_keys_txn(txn): + + self._simple_insert_many_txn( + txn, + table="e2e_room_keys", + values=[ + { + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + "version": version, + "first_message_index": room_keys['rooms'][room_id]['sessions'][session_id]['first_message_index'], + "forwarded_count": room_keys['rooms'][room_id]['sessions'][session_id]['forwarded_count'], + "is_verified": room_keys['rooms'][room_id]['sessions'][session_id]['is_verified'], + "session_data": room_keys['rooms'][room_id]['sessions'][session_id]['session_data'], + } + for session_id in room_keys['rooms'][room_id]['sessions'] + for room_id in room_keys['rooms'] + ] + ) + + return True + + return self.runInteraction( + "set_e2e_room_keys", _set_e2e_room_keys_txn + ) + + @defer.inlineCallbacks + def get_e2e_room_keys(self, user_id, version, room_id, session_id): + + keyvalues={ + "user_id": user_id, + "version": version, + } + if room_id: keyvalues['room_id'] = room_id + if session_id: keyvalues['session_id'] = session_id + + rows = yield self._simple_select_list( + table="e2e_room_keys", + keyvalues=keyvalues, + retcols=( + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_keys", + ) + + sessions = {} + sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; + defer.returnValue(sessions); diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql new file mode 100644 index 0000000000..51b826e8b3 --- /dev/null +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -0,0 +1,40 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- users' optionally backed up encrypted e2e sessions +CREATE TABLE e2e_room_keys ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + session_id TEXT NOT NULL, + version INT NOT NULL, + first_message_index INT, + forwarded_count INT, + is_verified BOOLEAN, + session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); +CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); +CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); + +-- the versioning metadata about versions of users' encrypted e2e session backups +CREATE TABLE e2e_room_key_versions ( + user_id TEXT NOT NULL, + version INT NOT NULL, + algorithm TEXT NOT NULL, + dummy_session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); -- cgit 1.4.1 From 0bc4627a731d0edd437905a5b07e85421c7553d8 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 17:54:48 +0000 Subject: interim WIP checkin; doesn't build yet --- synapse/handlers/e2e_room_keys.py | 46 +++++++++++++++++++++---------- synapse/rest/client/v2_alpha/room_keys.py | 37 ++++++++++++++++++++++--- synapse/storage/e2e_room_keys.py | 20 ++++++++++++++ 3 files changed, 84 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 78c838a829..15e3beb5ed 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -20,8 +20,7 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import SynapseError, CodeMessageException -from synapse.types import get_domain_from_id -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.async import Linearizer from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -30,31 +29,48 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): def __init__(self, hs): self.store = hs.get_datastore() + self._upload_linearizer = async.Linearizer("upload_room_keys_lock") @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) defer.returnValue(results) + @defer.inlineCallbacks + def delete_room_keys(self, user_id, version, room_id, session_id): + yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): # TODO: Validate the JSON to make sure it has the right keys. - # go through the room_keys - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - session = room_keys['rooms'][room_id]['sessions'][session_id] - - # get a lock + # XXX: perhaps we should use a finer grained lock here? + with (yield self._upload_linearizer.queue(user_id): - # get the room_key for this particular row - yield self.store.get_e2e_room_key() + # go through the room_keys + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + room_key = room_keys['rooms'][room_id]['sessions'][session_id] - # check whether we merge or not - if() + # get the room_key for this particular row + current_room_key = yield self.store.get_e2e_room_key( + user_id, version, room_id, session_id + ) - # if so, we set it - yield self.store.set_e2e_room_key() + # check whether we merge or not. spelling it out with if/elifs rather than + # lots of booleans for legibility. + replace = False + if current_room_key: + if room_key['is_verified'] and not current_room_key['is_verified']: + replace = True + elif room_key['first_message_index'] < current_room_key['first_message_index']: + replace = True + elif room_key['forwarded_count'] < room_key['forwarded_count']: + replace = True - # release the lock + # if so, we set the new room_key + if replace: + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 9b93001919..7291018a48 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -28,7 +28,7 @@ from ._base import client_v2_patterns logger = logging.getLogger(__name__) -class RoomKeysUploadServlet(RestServlet): +class RoomKeysServlet(RestServlet): PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") def __init__(self, hs): @@ -41,16 +41,45 @@ class RoomKeysUploadServlet(RestServlet): self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @defer.inlineCallbacks - def on_POST(self, request, room_id, session_id): - requester = yield self.auth.get_user_by_req(request, allow_guest=True) + def on_PUT(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) + version = request.args.get("version", None) + + if session_id: + body = { "sessions": { session_id : body } } + + if room_id: + body = { "rooms": { room_id : body } } result = yield self.e2e_room_keys_handler.upload_room_keys( user_id, version, body ) defer.returnValue((200, result)) + @defer.inlineCallbacks + def on_GET(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + version = request.args.get("version", None) + + room_keys = yield self.e2e_room_keys_handler.get_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue((200, room_keys)) + + @defer.inlineCallbacks + def on_DELETE(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + version = request.args.get("version", None) + + yield self.e2e_room_keys_handler.delete_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue((200, {})) + def register_servlets(hs, http_server): - RoomKeysUploadServlet(hs).register(http_server) + RoomKeysServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 9f6d47e1b6..903dc083f8 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from twisted.internet import defer from synapse.util.caches.descriptors import cached @@ -77,6 +78,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) + # XXX: this isn't currently used and isn't tested anywhere + # it could be used in future for bulk-uploading new versions of room_keys + # for a user or something though. def set_e2e_room_keys(self, user_id, version, room_keys): def _set_e2e_room_keys_txn(txn): @@ -131,3 +135,19 @@ class EndToEndRoomKeyStore(SQLBaseStore): sessions = {} sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; defer.returnValue(sessions); + + @defer.inlineCallbacks + def delete_e2e_room_keys(self, user_id, version, room_id, session_id): + + keyvalues={ + "user_id": user_id, + "version": version, + } + if room_id: keyvalues['room_id'] = room_id + if session_id: keyvalues['session_id'] = session_id + + yield self._simple_delete( + table="e2e_room_keys", + keyvalues=keyvalues, + desc="delete_e2e_room_keys", + ) -- cgit 1.4.1 From 6b8c07abc293bd222051e769550753bc0fd6f667 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 21:44:25 +0000 Subject: make it work and fix pep8 --- synapse/handlers/e2e_room_keys.py | 69 +++++++++------ synapse/rest/__init__.py | 2 + synapse/rest/client/v2_alpha/room_keys.py | 33 ++++--- synapse/server.py | 5 ++ synapse/storage/__init__.py | 2 + synapse/storage/e2e_room_keys.py | 103 +++++++++++++--------- synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- 7 files changed, 134 insertions(+), 82 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 15e3beb5ed..93f4ad5194 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -13,15 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ujson as json import logging -from canonicaljson import encode_canonical_json from twisted.internet import defer -from synapse.api.errors import SynapseError, CodeMessageException +from synapse.api.errors import StoreError from synapse.util.async import Linearizer -from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -29,11 +26,13 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): def __init__(self, hs): self.store = hs.get_datastore() - self._upload_linearizer = async.Linearizer("upload_room_keys_lock") + self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): - results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) + results = yield self.store.get_e2e_room_keys( + user_id, version, room_id, session_id + ) defer.returnValue(results) @defer.inlineCallbacks @@ -46,31 +45,49 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? - with (yield self._upload_linearizer.queue(user_id): + with (yield self._upload_linearizer.queue(user_id)): # go through the room_keys for room_id in room_keys['rooms']: for session_id in room_keys['rooms'][room_id]['sessions']: room_key = room_keys['rooms'][room_id]['sessions'][session_id] - # get the room_key for this particular row - current_room_key = yield self.store.get_e2e_room_key( - user_id, version, room_id, session_id + yield self._upload_room_key( + user_id, version, room_id, session_id, room_key ) - # check whether we merge or not. spelling it out with if/elifs rather than - # lots of booleans for legibility. - replace = False - if current_room_key: - if room_key['is_verified'] and not current_room_key['is_verified']: - replace = True - elif room_key['first_message_index'] < current_room_key['first_message_index']: - replace = True - elif room_key['forwarded_count'] < room_key['forwarded_count']: - replace = True - - # if so, we set the new room_key - if replace: - yield self.store.set_e2e_room_key( - user_id, version, room_id, session_id, room_key - ) + @defer.inlineCallbacks + def _upload_room_key(self, user_id, version, room_id, session_id, room_key): + # get the room_key for this particular row + current_room_key = None + try: + current_room_key = yield self.store.get_e2e_room_key( + user_id, version, room_id, session_id + ) + except StoreError as e: + if e.code == 404: + pass + else: + raise + + # check whether we merge or not. spelling it out with if/elifs rather + # than lots of booleans for legibility. + upsert = True + if current_room_key: + if room_key['is_verified'] and not current_room_key['is_verified']: + pass + elif ( + room_key['first_message_index'] < + current_room_key['first_message_index'] + ): + pass + elif room_key['forwarded_count'] < room_key['forwarded_count']: + pass + else: + upsert = False + + # if so, we set the new room_key + if upsert: + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 3418f06fd6..4856822a5d 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -46,6 +46,7 @@ from synapse.rest.client.v2_alpha import ( receipts, register, report_event, + room_keys, sendtodevice, sync, tags, @@ -102,6 +103,7 @@ class ClientRestResource(JsonResource): auth.register_servlets(hs, client_resource) receipts.register_servlets(hs, client_resource) read_marker.register_servlets(hs, client_resource) + room_keys.register_servlets(hs, client_resource) keys.register_servlets(hs, client_resource) tokenrefresh.register_servlets(hs, client_resource) tags.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 7291018a48..010aed98f9 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,26 +17,25 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, parse_integer + RestServlet, parse_json_object_from_request ) -from synapse.http.servlet import parse_string -from synapse.types import StreamToken from ._base import client_v2_patterns logger = logging.getLogger(__name__) class RoomKeysServlet(RestServlet): - PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") + PATTERNS = client_v2_patterns( + "/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$" + ) def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysUploadServlet, self).__init__() + super(RoomKeysServlet, self).__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -45,24 +44,32 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = request.args.get("version", None) + version = request.args.get("version")[0] if session_id: - body = { "sessions": { session_id : body } } + body = { + "sessions": { + session_id: body + } + } if room_id: - body = { "rooms": { room_id : body } } + body = { + "rooms": { + room_id: body + } + } - result = yield self.e2e_room_keys_handler.upload_room_keys( + yield self.e2e_room_keys_handler.upload_room_keys( user_id, version, body ) - defer.returnValue((200, result)) + defer.returnValue((200, {})) @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version", None) + version = request.args.get("version")[0] room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id @@ -73,7 +80,7 @@ class RoomKeysServlet(RestServlet): def on_DELETE(self, request, room_id, session_id): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version", None) + version = request.args.get("version")[0] yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id diff --git a/synapse/server.py b/synapse/server.py index 140be9ebe8..706cb1361f 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -49,6 +49,7 @@ from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.e2e_keys import E2eKeysHandler +from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.groups_local import GroupsLocalHandler from synapse.handlers.initial_sync import InitialSyncHandler @@ -127,6 +128,7 @@ class HomeServer(object): 'auth_handler', 'device_handler', 'e2e_keys_handler', + 'e2e_room_keys_handler', 'event_handler', 'event_stream_handler', 'initial_sync_handler', @@ -288,6 +290,9 @@ class HomeServer(object): def build_e2e_keys_handler(self): return E2eKeysHandler(self) + def build_e2e_room_keys_handler(self): + return E2eRoomKeysHandler(self) + def build_application_service_api(self): return ApplicationServiceApi(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 134e4a80f1..69cb28268a 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,6 +30,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt from .client_ips import ClientIpStore from .deviceinbox import DeviceInboxStore from .directory import DirectoryStore +from .e2e_room_keys import EndToEndRoomKeyStore from .end_to_end_keys import EndToEndKeyStore from .engines import PostgresEngine from .event_federation import EventFederationStore @@ -76,6 +77,7 @@ class DataStore(RoomMemberStore, RoomStore, ApplicationServiceTransactionStore, ReceiptsStore, EndToEndKeyStore, + EndToEndRoomKeyStore, SearchStore, TagsStore, AccountDataStore, diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 903dc083f8..5982710bd5 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -15,11 +15,6 @@ from twisted.internet import defer -from synapse.util.caches.descriptors import cached - -from canonicaljson import encode_canonical_json -import ujson as json - from ._base import SQLBaseStore @@ -45,29 +40,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) - defer.returnValue(row); + defer.returnValue(row) def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): def _set_e2e_room_key_txn(txn): - self._simple_upsert( + self._simple_upsert_txn( txn, table="e2e_room_keys", keyvalues={ "user_id": user_id, "room_id": room_id, - "session_id": session_id, - } - values=[ - { - "version": version, - "first_message_index": room_key['first_message_index'], - "forwarded_count": room_key['forwarded_count'], - "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], - } - ], + "session_id": session_id, + }, + values={ + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + }, lock=False, ) @@ -77,7 +70,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): "set_e2e_room_key", _set_e2e_room_key_txn ) - # XXX: this isn't currently used and isn't tested anywhere # it could be used in future for bulk-uploading new versions of room_keys # for a user or something though. @@ -85,23 +77,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): def _set_e2e_room_keys_txn(txn): + values = [] + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + session = room_keys['rooms'][room_id]['sessions'][session_id] + values.append( + { + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + "version": version, + "first_message_index": session['first_message_index'], + "forwarded_count": session['forwarded_count'], + "is_verified": session['is_verified'], + "session_data": session['session_data'], + } + ) + self._simple_insert_many_txn( txn, table="e2e_room_keys", - values=[ - { - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - "version": version, - "first_message_index": room_keys['rooms'][room_id]['sessions'][session_id]['first_message_index'], - "forwarded_count": room_keys['rooms'][room_id]['sessions'][session_id]['forwarded_count'], - "is_verified": room_keys['rooms'][room_id]['sessions'][session_id]['is_verified'], - "session_data": room_keys['rooms'][room_id]['sessions'][session_id]['session_data'], - } - for session_id in room_keys['rooms'][room_id]['sessions'] - for room_id in room_keys['rooms'] - ] + values=values ) return True @@ -113,17 +109,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys(self, user_id, version, room_id, session_id): - keyvalues={ + keyvalues = { "user_id": user_id, "version": version, } - if room_id: keyvalues['room_id'] = room_id - if session_id: keyvalues['session_id'] = session_id + if room_id: + keyvalues['room_id'] = room_id + if session_id: + keyvalues['session_id'] = session_id rows = yield self._simple_select_list( table="e2e_room_keys", keyvalues=keyvalues, retcols=( + "user_id", + "room_id", + "session_id", "first_message_index", "forwarded_count", "is_verified", @@ -132,19 +133,37 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = {} - sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; - defer.returnValue(sessions); + # perlesque autovivification from https://stackoverflow.com/a/19829714/6764493 + class AutoVivification(dict): + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + value = self[item] = type(self)() + return value + + sessions = AutoVivification() + for row in rows: + sessions['rooms'][row['room_id']]['sessions'][row['session_id']] = { + "first_message_index": row["first_message_index"], + "forwarded_count": row["forwarded_count"], + "is_verified": row["is_verified"], + "session_data": row["session_data"], + } + + defer.returnValue(sessions) @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id, session_id): - keyvalues={ + keyvalues = { "user_id": user_id, "version": version, } - if room_id: keyvalues['room_id'] = room_id - if session_id: keyvalues['session_id'] = session_id + if room_id: + keyvalues['room_id'] = room_id + if session_id: + keyvalues['session_id'] = session_id yield self._simple_delete( table="e2e_room_keys", diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 51b826e8b3..6b344c5ad7 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -29,7 +29,7 @@ CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); --- the versioning metadata about versions of users' encrypted e2e session backups +-- the metadata for each generation of encrypted e2e session backups CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, version INT NOT NULL, -- cgit 1.4.1 From cf1e2000f623afea8f3afb58e4a7659288c45777 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 23:06:43 +0000 Subject: document the API --- synapse/rest/client/v2_alpha/room_keys.py | 133 ++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 010aed98f9..be82eccb2b 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -41,6 +41,79 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, room_id, session_id): + """ + Uploads one or more encrypted E2E room keys for backup purposes. + room_id: the ID of the room the keys are for (optional) + session_id: the ID for the E2E room keys for the room (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + + Each session has: + * first_message_index: a numeric index indicating the oldest message + encrypted by this session. + * forwarded_count: how many times the uploading client claims this key + has been shared (forwarded) + * is_verified: whether the client that uploaded the keys claims they + were sent by a device which they've verified + * session_data: base64-encrypted data describing the session. + + Returns 200 OK on success with body {} + + The API is designed to be otherwise agnostic to the room_key encryption + algorithm being used. Sessions are merged with existing ones in the + backup using the heuristics: + * is_verified sessions always win over unverified sessions + * older first_message_index always win over newer sessions + * lower forwarded_count always wins over higher forwarded_count + + We trust the clients not to lie and corrupt their own backups. + + POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 + Content-Type: application/json + + { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + + Or... + + POST /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1 + Content-Type: application/json + + { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + + Or... + + POST /room_keys/keys?version=1 HTTP/1.1 + Content-Type: application/json + + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -67,6 +140,57 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): + """ + Retrieves one or more encrypted E2E room keys for backup purposes. + Symmetric with the PUT version of the API. + + room_id: the ID of the room to retrieve the keys for (optional) + session_id: the ID for the E2E room keys to retrieve the keys for (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + + Returns as follows: + + GET /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 + { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + + Or... + + GET /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1 + { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + + Or... + + GET /room_keys/keys?version=1 HTTP/1.1 + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() version = request.args.get("version")[0] @@ -78,6 +202,15 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): + """ + Deletes one or more encrypted E2E room keys for a user for backup purposes. + + room_id: the ID of the room whose keys to delete (optional) + session_id: the ID for the E2E session to delete (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() version = request.args.get("version")[0] -- cgit 1.4.1 From 8ae64b270f7742abfe4cf0b8d140c81993464ea4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 6 Dec 2017 01:02:57 +0000 Subject: implement /room_keys/version too (untested) --- synapse/api/errors.py | 25 ++++++++++ synapse/handlers/e2e_room_keys.py | 47 +++++++++++++++++-- synapse/rest/client/v2_alpha/room_keys.py | 47 +++++++++++++++++++ synapse/storage/e2e_room_keys.py | 56 +++++++++++++++++++++++ synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- 5 files changed, 171 insertions(+), 6 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index b41d595059..8c97e91ba1 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -56,6 +56,7 @@ class Codes(object): CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN" CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM" MAU_LIMIT_EXCEEDED = "M_MAU_LIMIT_EXCEEDED" + WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION" class CodeMessageException(RuntimeError): @@ -285,6 +286,30 @@ class LimitExceededError(SynapseError): ) +class RoomKeysVersionError(SynapseError): + """A client has tried to upload to a non-current version of the room_keys store + """ + def __init__(self, code=403, msg="Wrong room_keys version", current_version=None, + errcode=Codes.WRONG_ROOM_KEYS_VERSION): + super(RoomKeysVersionError, self).__init__(code, msg, errcode) + self.current_version = current_version + + def error_dict(self): + return cs_error( + self.msg, + self.errcode, + current_version=self.current_version, + ) + + +def cs_exception(exception): + if isinstance(exception, CodeMessageException): + return exception.error_dict() + else: + logger.error("Unknown exception type: %s", type(exception)) + return {} + + def cs_error(msg, code=Codes.UNKNOWN, **kwargs): """ Utility method for constructing an error response for client-server interactions. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 93f4ad5194..4333ca610c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import StoreError +from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError from synapse.util.async import Linearizer logger = logging.getLogger(__name__) @@ -30,10 +30,13 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): - results = yield self.store.get_e2e_room_keys( - user_id, version, room_id, session_id - ) - defer.returnValue(results) + # we deliberately take the lock to get keys so that changing the version + # works atomically + with (yield self._upload_linearizer.queue(user_id)): + results = yield self.store.get_e2e_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue(results) @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id, session_id): @@ -44,6 +47,16 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. + # Check that the version we're trying to upload is the current version + try: + version_info = yield self.get_version_info(user_id, version) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%d' not found" % (version,)) + + if version_info.version != version: + raise RoomKeysVersionError(current_version=version_info.version) + # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): @@ -91,3 +104,27 @@ class E2eRoomKeysHandler(object): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + + @defer.inlineCallbacks + def create_version(self, user_id, version, version_info): + + # TODO: Validate the JSON to make sure it has the right keys. + + # lock everyone out until we've switched version + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.create_version( + user_id, version, version_info + ) + + @defer.inlineCallbacks + def get_version_info(self, user_id, version): + with (yield self._upload_linearizer.queue(user_id)): + results = yield self.store.get_e2e_room_key_version( + user_id, version + ) + defer.returnValue(results) + + @defer.inlineCallbacks + def delete_version(self, user_id, version): + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.delete_e2e_room_key_version(user_id, version) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index be82eccb2b..4d76e1d824 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -221,5 +221,52 @@ class RoomKeysServlet(RestServlet): defer.returnValue((200, {})) +class RoomKeysVersionServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/room_keys/version(/(?P[^/]+))?$" + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysVersionServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + + @defer.inlineCallbacks + def on_POST(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + info = parse_json_object_from_request(request) + + new_version = yield self.e2e_room_keys_handler.create_version( + user_id, version, info + ) + defer.returnValue((200, {"version": new_version})) + + @defer.inlineCallbacks + def on_GET(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + info = yield self.e2e_room_keys_handler.get_version_info( + user_id, version + ) + defer.returnValue((200, info)) + + @defer.inlineCallbacks + def on_DELETE(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + yield self.e2e_room_keys_handler.delete_version( + user_id, version + ) + defer.returnValue((200, {})) + + def register_servlets(hs, http_server): RoomKeysServlet(hs).register(http_server) + RoomKeysVersionServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 5982710bd5..994878acf6 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -170,3 +170,59 @@ class EndToEndRoomKeyStore(SQLBaseStore): keyvalues=keyvalues, desc="delete_e2e_room_keys", ) + + @defer.inlineCallbacks + def get_e2e_room_key_version(self, user_id, version): + + row = yield self._simple_select_one( + table="e2e_room_key_versions", + keyvalues={ + "user_id": user_id, + "version": version, + }, + retcols=( + "user_id", + "version", + "algorithm", + "auth_data", + ), + desc="get_e2e_room_key_version_info", + ) + + defer.returnValue(row) + + def create_e2e_room_key_version(self, user_id, version, info): + + def _create_e2e_room_key_version_txn(txn): + + self._simple_insert_txn( + txn, + table="e2e_room_key_versions", + values={ + "user_id": user_id, + "version": version, + "algorithm": info["algorithm"], + "auth_data": info["auth_data"], + }, + lock=False, + ) + + return True + + return self.runInteraction( + "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn + ) + + @defer.inlineCallbacks + def delete_e2e_room_key_version(self, user_id, version): + + keyvalues = { + "user_id": user_id, + "version": version, + } + + yield self._simple_delete( + table="e2e_room_key_versions", + keyvalues=keyvalues, + desc="delete_e2e_room_key_version", + ) diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 6b344c5ad7..463f828c66 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -34,7 +34,7 @@ CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, version INT NOT NULL, algorithm TEXT NOT NULL, - dummy_session_data TEXT NOT NULL + auth_data TEXT NOT NULL ); CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); -- cgit 1.4.1 From 69e51c7ba48a84b48ab64c8c290a232d14193a18 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 6 Dec 2017 10:02:49 +0100 Subject: make /room_keys/version work --- synapse/handlers/e2e_room_keys.py | 18 +++++++++++------- synapse/rest/client/v2_alpha/room_keys.py | 9 ++++++++- synapse/storage/e2e_room_keys.py | 22 +++++++++++++++++----- synapse/storage/schema/delta/46/e2e_room_keys.sql | 4 ++-- 4 files changed, 38 insertions(+), 15 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 4333ca610c..bd58be6558 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -48,13 +48,16 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. # Check that the version we're trying to upload is the current version + try: version_info = yield self.get_version_info(user_id, version) except StoreError as e: if e.code == 404: - raise SynapseError(404, "Version '%d' not found" % (version,)) + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e - if version_info.version != version: + if version_info['version'] != version: raise RoomKeysVersionError(current_version=version_info.version) # XXX: perhaps we should use a finer grained lock here? @@ -81,7 +84,7 @@ class E2eRoomKeysHandler(object): if e.code == 404: pass else: - raise + raise e # check whether we merge or not. spelling it out with if/elifs rather # than lots of booleans for legibility. @@ -106,20 +109,21 @@ class E2eRoomKeysHandler(object): ) @defer.inlineCallbacks - def create_version(self, user_id, version, version_info): + def create_version(self, user_id, version_info): # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version with (yield self._upload_linearizer.queue(user_id)): - yield self.store.create_version( - user_id, version, version_info + new_version = yield self.store.create_e2e_room_key_version( + user_id, version_info ) + defer.returnValue(new_version) @defer.inlineCallbacks def get_version_info(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_key_version( + results = yield self.store.get_e2e_room_key_version_info( user_id, version ) defer.returnValue(results) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 4d76e1d824..128b732fb1 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer +from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request ) @@ -237,15 +238,21 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, version): + if version: + raise SynapseError(405, "Cannot POST to a specific version") + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version( - user_id, version, info + user_id, info ) defer.returnValue((200, {"version": new_version})) + # we deliberately don't have a PUT /version, as these things really should + # be immutable to avoid people footgunning + @defer.inlineCallbacks def on_GET(self, request, version): requester = yield self.auth.get_user_by_req(request, allow_guest=False) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 994878acf6..8efca11a8c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -172,7 +172,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_key_version(self, user_id, version): + def get_e2e_room_key_version_info(self, user_id, version): row = yield self._simple_select_one( table="e2e_room_key_versions", @@ -191,23 +191,35 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) - def create_e2e_room_key_version(self, user_id, version, info): + def create_e2e_room_key_version(self, user_id, info): + """Atomically creates a new version of this user's e2e_room_keys store + with the given version info. + """ def _create_e2e_room_key_version_txn(txn): + txn.execute( + "SELECT MAX(version) FROM e2e_room_key_versions WHERE user_id=?", + (user_id,) + ) + current_version = txn.fetchone()[0] + if current_version is None: + current_version = 0 + + new_version = current_version + 1 + self._simple_insert_txn( txn, table="e2e_room_key_versions", values={ "user_id": user_id, - "version": version, + "version": new_version, "algorithm": info["algorithm"], "auth_data": info["auth_data"], }, - lock=False, ) - return True + return new_version return self.runInteraction( "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 463f828c66..0d2a85fbe6 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -18,7 +18,7 @@ CREATE TABLE e2e_room_keys ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, - version INT NOT NULL, + version TEXT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, @@ -32,7 +32,7 @@ CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); -- the metadata for each generation of encrypted e2e session backups CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, - version INT NOT NULL, + version TEXT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL ); -- cgit 1.4.1 From 0abb205b47158a4160ddceb317c0245d640b6e3f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 18 Dec 2017 01:52:46 +0000 Subject: blindly incorporate PR review - needs testing & fixing --- synapse/api/errors.py | 11 ++- synapse/handlers/e2e_room_keys.py | 88 +++++++++++++++-------- synapse/rest/client/v2_alpha/room_keys.py | 2 + synapse/storage/e2e_room_keys.py | 69 ++++++++---------- synapse/storage/schema/delta/46/e2e_room_keys.sql | 8 +-- 5 files changed, 99 insertions(+), 79 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 8c97e91ba1..d37bcb4082 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -289,9 +289,14 @@ class LimitExceededError(SynapseError): class RoomKeysVersionError(SynapseError): """A client has tried to upload to a non-current version of the room_keys store """ - def __init__(self, code=403, msg="Wrong room_keys version", current_version=None, - errcode=Codes.WRONG_ROOM_KEYS_VERSION): - super(RoomKeysVersionError, self).__init__(code, msg, errcode) + def __init__(self, current_version): + """ + Args: + current_version (str): the current version of the store they should have used + """ + super(RoomKeysVersionError, self).__init__( + 403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION + ) self.current_version = current_version def error_dict(self): diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index bd58be6558..dda31fdd24 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -24,8 +24,21 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): + """ + Implements an optional realtime backup mechanism for encrypted E2E megolm room keys. + This gives a way for users to store and recover their megolm keys if they lose all + their clients. It should also extend easily to future room key mechanisms. + The actual payload of the encrypted keys is completely opaque to the handler. + """ + def __init__(self, hs): self.store = hs.get_datastore() + + # Used to lock whenever a client is uploading key data. This prevents collisions + # between clients trying to upload the details of a new session, given all + # clients belonging to a user will receive and try to upload a new session at + # roughly the same time. Also used to lock out uploads when the key is being + # changed. self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks @@ -40,33 +53,34 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id, session_id): - yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + # lock for consistency with uploading + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): # TODO: Validate the JSON to make sure it has the right keys. - # Check that the version we're trying to upload is the current version - - try: - version_info = yield self.get_version_info(user_id, version) - except StoreError as e: - if e.code == 404: - raise SynapseError(404, "Version '%s' not found" % (version,)) - else: - raise e - - if version_info['version'] != version: - raise RoomKeysVersionError(current_version=version_info.version) - # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): - - # go through the room_keys - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - room_key = room_keys['rooms'][room_id]['sessions'][session_id] + # Check that the version we're trying to upload is the current version + try: + version_info = yield self.get_version_info(user_id, version) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e + + if version_info['version'] != version: + raise RoomKeysVersionError(current_version=version_info.version) + + # go through the room_keys. + # XXX: this should/could be done concurrently, given we're in a lock. + for room_id, room in room_keys['rooms'].iteritems(): + for session_id, session in room['sessions'].iteritems(): + room_key = session[session_id] yield self._upload_room_key( user_id, version, room_id, session_id, room_key @@ -86,10 +100,29 @@ class E2eRoomKeysHandler(object): else: raise e - # check whether we merge or not. spelling it out with if/elifs rather - # than lots of booleans for legibility. - upsert = True + if _should_replace_room_key(current_room_key, room_key): + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) + + def _should_replace_room_key(current_room_key, room_key): + """ + Determine whether to replace the current_room_key in our backup for this + session (if any) with a new room_key that has been uploaded. + + Args: + current_room_key (dict): Optional, the current room_key dict if any + room_key (dict): The new room_key dict which may or may not be fit to + replace the current_room_key + + Returns: + True if current_room_key should be replaced by room_key in the backup + """ + if current_room_key: + # spelt out with if/elifs rather than nested boolean expressions + # purely for legibility. + if room_key['is_verified'] and not current_room_key['is_verified']: pass elif ( @@ -97,16 +130,11 @@ class E2eRoomKeysHandler(object): current_room_key['first_message_index'] ): pass - elif room_key['forwarded_count'] < room_key['forwarded_count']: + elif room_key['forwarded_count'] < current_room_key['forwarded_count']: pass else: - upsert = False - - # if so, we set the new room_key - if upsert: - yield self.store.set_e2e_room_key( - user_id, version, room_id, session_id, room_key - ) + return False + return True @defer.inlineCallbacks def create_version(self, user_id, version_info): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 128b732fb1..70b7b4573f 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -68,6 +68,8 @@ class RoomKeysServlet(RestServlet): * lower forwarded_count always wins over higher forwarded_count We trust the clients not to lie and corrupt their own backups. + It also means that if your access_token is stolen, the attacker could + delete your backup. POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 Content-Type: application/json diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 8efca11a8c..c11417c415 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -44,30 +44,21 @@ class EndToEndRoomKeyStore(SQLBaseStore): def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): - def _set_e2e_room_key_txn(txn): - - self._simple_upsert_txn( - txn, - table="e2e_room_keys", - keyvalues={ - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - }, - values={ - "version": version, - "first_message_index": room_key['first_message_index'], - "forwarded_count": room_key['forwarded_count'], - "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], - }, - lock=False, - ) - - return True - - return self.runInteraction( - "set_e2e_room_key", _set_e2e_room_key_txn + yield self._simple_upsert( + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + }, + values={ + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + }, + lock=False, ) # XXX: this isn't currently used and isn't tested anywhere @@ -107,7 +98,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_keys(self, user_id, version, room_id, session_id): + def get_e2e_room_keys( + self, user_id, version, room_id=room_id, session_id=session_id + ): keyvalues = { "user_id": user_id, @@ -115,8 +108,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): } if room_id: keyvalues['room_id'] = room_id - if session_id: - keyvalues['session_id'] = session_id + if session_id: + keyvalues['session_id'] = session_id rows = yield self._simple_select_list( table="e2e_room_keys", @@ -133,18 +126,10 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - # perlesque autovivification from https://stackoverflow.com/a/19829714/6764493 - class AutoVivification(dict): - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - value = self[item] = type(self)() - return value - - sessions = AutoVivification() + sessions = {} for row in rows: - sessions['rooms'][row['room_id']]['sessions'][row['session_id']] = { + room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) + room_entry['sessions'][row['session_id']] = { "first_message_index": row["first_message_index"], "forwarded_count": row["forwarded_count"], "is_verified": row["is_verified"], @@ -154,7 +139,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(sessions) @defer.inlineCallbacks - def delete_e2e_room_keys(self, user_id, version, room_id, session_id): + def delete_e2e_room_keys( + self, user_id, version, room_id=room_id, session_id=session_id + ): keyvalues = { "user_id": user_id, @@ -162,8 +149,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): } if room_id: keyvalues['room_id'] = room_id - if session_id: - keyvalues['session_id'] = session_id + if session_id: + keyvalues['session_id'] = session_id yield self._simple_delete( table="e2e_room_keys", diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 0d2a85fbe6..16499ac34c 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -25,16 +25,14 @@ CREATE TABLE e2e_room_keys ( session_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); -CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); -CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); -- the metadata for each generation of encrypted e2e session backups -CREATE TABLE e2e_room_key_versions ( +CREATE TABLE e2e_room_keys_versions ( user_id TEXT NOT NULL, version TEXT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); +CREATE UNIQUE INDEX e2e_room_keys_versions_user_idx ON e2e_room_keys_versions(user_id); -- cgit 1.4.1 From cac02537998718c05a561918269745161378dd6f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 18 Dec 2017 01:58:53 +0000 Subject: rename room_key_version table correctly, and fix opt args --- synapse/handlers/e2e_room_keys.py | 6 +++--- synapse/storage/e2e_room_keys.py | 26 +++++++++++++------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index dda31fdd24..87be081b1c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -143,7 +143,7 @@ class E2eRoomKeysHandler(object): # lock everyone out until we've switched version with (yield self._upload_linearizer.queue(user_id)): - new_version = yield self.store.create_e2e_room_key_version( + new_version = yield self.store.create_e2e_room_keys_version( user_id, version_info ) defer.returnValue(new_version) @@ -151,7 +151,7 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def get_version_info(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_key_version_info( + results = yield self.store.get_e2e_room_keys_version_info( user_id, version ) defer.returnValue(results) @@ -159,4 +159,4 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def delete_version(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - yield self.store.delete_e2e_room_key_version(user_id, version) + yield self.store.delete_e2e_room_keys_version(user_id, version) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c11417c415..7e1cb13e74 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -99,7 +99,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys( - self, user_id, version, room_id=room_id, session_id=session_id + self, user_id, version, room_id=None, session_id=None ): keyvalues = { @@ -140,7 +140,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def delete_e2e_room_keys( - self, user_id, version, room_id=room_id, session_id=session_id + self, user_id, version, room_id=None, session_id=None ): keyvalues = { @@ -159,10 +159,10 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_key_version_info(self, user_id, version): + def get_e2e_room_keys_version_info(self, user_id, version): row = yield self._simple_select_one( - table="e2e_room_key_versions", + table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": version, @@ -173,20 +173,20 @@ class EndToEndRoomKeyStore(SQLBaseStore): "algorithm", "auth_data", ), - desc="get_e2e_room_key_version_info", + desc="get_e2e_room_keys_version_info", ) defer.returnValue(row) - def create_e2e_room_key_version(self, user_id, info): + def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. """ - def _create_e2e_room_key_version_txn(txn): + def _create_e2e_room_keys_version_txn(txn): txn.execute( - "SELECT MAX(version) FROM e2e_room_key_versions WHERE user_id=?", + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) ) current_version = txn.fetchone()[0] @@ -197,7 +197,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): self._simple_insert_txn( txn, - table="e2e_room_key_versions", + table="e2e_room_keys_versions", values={ "user_id": user_id, "version": new_version, @@ -209,11 +209,11 @@ class EndToEndRoomKeyStore(SQLBaseStore): return new_version return self.runInteraction( - "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn + "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) @defer.inlineCallbacks - def delete_e2e_room_key_version(self, user_id, version): + def delete_e2e_room_keys_version(self, user_id, version): keyvalues = { "user_id": user_id, @@ -221,7 +221,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): } yield self._simple_delete( - table="e2e_room_key_versions", + table="e2e_room_keys_versions", keyvalues=keyvalues, - desc="delete_e2e_room_key_version", + desc="delete_e2e_room_keys_version", ) -- cgit 1.4.1 From ca0b052307de8868d3e337f1ace5667dad740ab1 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 15:03:44 +0000 Subject: fix factoring out of _should_replace_room_key --- synapse/handlers/e2e_room_keys.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 87be081b1c..b67d6a2a7e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -100,15 +100,16 @@ class E2eRoomKeysHandler(object): else: raise e - if _should_replace_room_key(current_room_key, room_key): + if E2eRoomKeysHandler._should_replace_room_key(current_room_key, room_key): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + @staticmethod def _should_replace_room_key(current_room_key, room_key): """ - Determine whether to replace the current_room_key in our backup for this - session (if any) with a new room_key that has been uploaded. + Determine whether to replace a given current_room_key (if any) + with a newly uploaded room_key backup Args: current_room_key (dict): Optional, the current room_key dict if any -- cgit 1.4.1 From 8d14598e90396c52c9394cf807861921e053d8da Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 16:44:18 +0000 Subject: add storage docstring; remove unused set_e2e_room_keys --- synapse/storage/e2e_room_keys.py | 119 +++++++++++++++++++++++++++------------ 1 file changed, 83 insertions(+), 36 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 7e1cb13e74..45d2c9b433 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -22,6 +22,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_key(self, user_id, version, room_id, session_id): + """Get the encrypted E2E room key for a given session from a given + backup version of room_keys. We only store the 'best' room key for a given + session at a given time, as determined by the handler. + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup for the set of keys we're querying + room_id(str): the ID of the room whose keys we're querying. + This is a bit redundant as it's implied by the session_id, but + we include for consistency with the rest of the API. + session_id(str): the session whose room_key we're querying. + + Returns: + A deferred dict giving the session_data and message metadata for + this room key. + """ row = yield self._simple_select_one( table="e2e_room_keys", @@ -43,6 +59,17 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): + """Replaces or inserts the encrypted E2E room key for a given session in + a given backup + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_id(str): the ID of the room whose keys we're setting + session_id(str): the session whose room_key we're setting + Raises: + StoreError if stuff goes wrong, probably + """ yield self._simple_upsert( table="e2e_room_keys", @@ -61,46 +88,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): lock=False, ) - # XXX: this isn't currently used and isn't tested anywhere - # it could be used in future for bulk-uploading new versions of room_keys - # for a user or something though. - def set_e2e_room_keys(self, user_id, version, room_keys): - - def _set_e2e_room_keys_txn(txn): - - values = [] - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - session = room_keys['rooms'][room_id]['sessions'][session_id] - values.append( - { - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - "version": version, - "first_message_index": session['first_message_index'], - "forwarded_count": session['forwarded_count'], - "is_verified": session['is_verified'], - "session_data": session['session_data'], - } - ) - - self._simple_insert_many_txn( - txn, - table="e2e_room_keys", - values=values - ) - - return True - - return self.runInteraction( - "set_e2e_room_keys", _set_e2e_room_keys_txn - ) - @defer.inlineCallbacks def get_e2e_room_keys( self, user_id, version, room_id=None, session_id=None ): + """Bulk get the E2E room keys for a given backup, optionally filtered to a given + room, or a given session. + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup for the set of keys we're querying + room_id(str): Optional. the ID of the room whose keys we're querying, if any. + If not specified, we return the keys for all the rooms in the backup. + session_id(str): Optional. the session whose room_key we're querying, if any. + If specified, we also require the room_id to be specified. + If not specified, we return all the keys in this version of + the backup (or for the specified room) + + Returns: + A deferred list of dicts giving the session_data and message metadata for + these room keys. + """ keyvalues = { "user_id": user_id, @@ -142,6 +150,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): def delete_e2e_room_keys( self, user_id, version, room_id=None, session_id=None ): + """Bulk delete the E2E room keys for a given backup, optionally filtered to a given + room or a given session. + + Args: + user_id(str): the user whose backup we're deleting from + version(str): the version ID of the backup for the set of keys we're deleting + room_id(str): Optional. the ID of the room whose keys we're deleting, if any. + If not specified, we delete the keys for all the rooms in the backup. + session_id(str): Optional. the session whose room_key we're querying, if any. + If specified, we also require the room_id to be specified. + If not specified, we delete all the keys in this version of + the backup (or for the specified room) + + Returns: + A deferred of the deletion transaction + """ keyvalues = { "user_id": user_id, @@ -160,6 +184,15 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys_version_info(self, user_id, version): + """Get info etadata about a given version of our room_keys backup + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup we're querying about + + Returns: + A deferred dict giving the info metadata for this backup version + """ row = yield self._simple_select_one( table="e2e_room_keys_versions", @@ -181,6 +214,13 @@ class EndToEndRoomKeyStore(SQLBaseStore): def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. + + Args: + user_id(str): the user whose backup we're creating a version + info(dict): the info about the backup version to be created + + Returns: + A deferred string for the newly created version ID """ def _create_e2e_room_keys_version_txn(txn): @@ -214,6 +254,13 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def delete_e2e_room_keys_version(self, user_id, version): + """Delete a given backup version of the user's room keys. + Doesn't delete their actual key data. + + Args: + user_id(str): the user whose backup version we're deleting + version(str): the ID of the backup version we're deleting + """ keyvalues = { "user_id": user_id, -- cgit 1.4.1 From 9f500cb39efa608c02f212711a5ad2177757bf4b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 17:42:17 +0000 Subject: more docstring for the e2e_room_keys rest --- synapse/handlers/e2e_room_keys.py | 2 -- synapse/rest/client/v2_alpha/room_keys.py | 51 +++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index b67d6a2a7e..7a940d1c21 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -59,7 +59,6 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): - # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? @@ -139,7 +138,6 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def create_version(self, user_id, version_info): - # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 70b7b4573f..04547c7d43 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -208,6 +208,10 @@ class RoomKeysServlet(RestServlet): """ Deletes one or more encrypted E2E room keys for a user for backup purposes. + DELETE /room_keys/keys/!abc:matrix.org/c0ff33?version=1 + HTTP/1.1 200 OK + {} + room_id: the ID of the room whose keys to delete (optional) session_id: the ID for the E2E session to delete (optional) version: the version of the user's backup which this data is for. @@ -240,6 +244,33 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, version): + """ + Create a new backup version for this user's room_keys with the given + info. The version is allocated by the server and returned to the user + in the response. This API is intended to be used whenever the user + changes the encryption key for their backups, ensuring that backups + encrypted with different keys don't collide. + + The algorithm passed in the version info is a reverse-DNS namespaced + identifier to describe the format of the encrypted backupped keys. + + The auth_data is { user_id: "user_id", nonce: } + encrypted using the algorithm and current encryption key described above. + + POST /room_keys/version + Content-Type: application/json + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + + HTTP/1.1 200 OK + Content-Type: application/json + { + "version": 12345 + } + """ + if version: raise SynapseError(405, "Cannot POST to a specific version") @@ -257,6 +288,17 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, version): + """ + Retrieve the version information about a given version of the user's + room_keys backup. + + GET /room_keys/version/12345 HTTP/1.1 + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -267,6 +309,15 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, version): + """ + Delete the information about a given version of the user's + room_keys backup. Doesn't delete the actual room data. + + DELETE /room_keys/version/12345 HTTP/1.1 + HTTP/1.1 200 OK + {} + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() -- cgit 1.4.1 From 9f0791b7bd030a70182cd9b33bbe2f78dba705dd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:35:10 +0000 Subject: add a tonne of docstring; make upload_room_keys properly assert version --- synapse/storage/e2e_room_keys.py | 51 ++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 45d2c9b433..e04e6a3690 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -67,6 +67,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): version(str): the version ID of the backup we're updating room_id(str): the ID of the room whose keys we're setting session_id(str): the session whose room_key we're setting + room_key(dict): the room_key being set Raises: StoreError if stuff goes wrong, probably """ @@ -182,35 +183,46 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="delete_e2e_room_keys", ) - @defer.inlineCallbacks - def get_e2e_room_keys_version_info(self, user_id, version): - """Get info etadata about a given version of our room_keys backup + def get_e2e_room_keys_version_info(self, user_id, version=None): + """Get info metadata about a version of our room_keys backup. Args: user_id(str): the user whose backup we're querying - version(str): the version ID of the backup we're querying about - + version(str): Optional. the version ID of the backup we're querying about + If missing, we return the information about the current version. + Raises: + StoreError: with code 404 if there are no e2e_room_keys_versions present Returns: A deferred dict giving the info metadata for this backup version """ - row = yield self._simple_select_one( - table="e2e_room_keys_versions", - keyvalues={ - "user_id": user_id, - "version": version, - }, - retcols=( - "user_id", - "version", - "algorithm", - "auth_data", - ), + def _get_e2e_room_keys_version_info_txn(txn): + if version is None: + txn.execute( + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + (user_id,) + ) + version = txn.fetchone()[0] + + return self._simple_select_one_txn( + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": version, + }, + retcols=( + "user_id", + "version", + "algorithm", + "auth_data", + ), + ) + + return self.runInteraction( desc="get_e2e_room_keys_version_info", + _get_e2e_room_keys_version_info_txn ) - defer.returnValue(row) - def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. @@ -224,7 +236,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): """ def _create_e2e_room_keys_version_txn(txn): - txn.execute( "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) -- cgit 1.4.1 From 14b3da63a339333292a83410c0ba3148bcb644ba Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:37:44 +0000 Subject: add a tonne of docstring; make upload_room_keys properly assert version --- synapse/handlers/e2e_room_keys.py | 111 +++++++++++++++++++++++++++--- synapse/rest/client/v2_alpha/room_keys.py | 11 ++- 2 files changed, 113 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 7a940d1c21..2fa025bfc7 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -42,7 +42,16 @@ class E2eRoomKeysHandler(object): self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks - def get_room_keys(self, user_id, version, room_id, session_id): + def get_room_keys(self, user_id, version, room_id=None, session_id=None): + """Bulk get the E2E room keys for a given backup, optionally filtered to a given + room, or a given session. + See EndToEndRoomKeyStore.get_e2e_room_keys for full details. + + Returns: + A deferred list of dicts giving the session_data and message metadata for + these room keys. + """ + # we deliberately take the lock to get keys so that changing the version # works atomically with (yield self._upload_linearizer.queue(user_id)): @@ -52,20 +61,56 @@ class E2eRoomKeysHandler(object): defer.returnValue(results) @defer.inlineCallbacks - def delete_room_keys(self, user_id, version, room_id, session_id): + def delete_room_keys(self, user_id, version, room_id=None, session_id=None): + """Bulk delete the E2E room keys for a given backup, optionally filtered to a given + room or a given session. + See EndToEndRoomKeyStore.delete_e2e_room_keys for full details. + + Returns: + A deferred of the deletion transaction + """ + # lock for consistency with uploading with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): + """Bulk upload a list of room keys into a given backup version, asserting + that the given version is the current backup version. room_keys are merged + into the current backup as described in RoomKeysServlet.on_PUT(). + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_keys(dict): a nested dict describing the room_keys we're setting: + + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + + Raises: + SynapseError: with code 404 if there are no versions defined + RoomKeysVersionError: if the uploaded version is not the current version + """ + # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version - try: - version_info = yield self.get_version_info(user_id, version) + version_info = yield self.get_current_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -87,6 +132,17 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def _upload_room_key(self, user_id, version, room_id, session_id, room_key): + """Upload a given room_key for a given room and session into a given + version of the backup. Merges the key with any which might already exist. + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_id(str): the ID of the room whose keys we're setting + session_id(str): the session whose room_key we're setting + room_key(dict): the room_key being set + """ + # get the room_key for this particular row current_room_key = None try: @@ -138,6 +194,23 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def create_version(self, user_id, version_info): + """Create a new backup version. This automatically becomes the new + backup version for the user's keys; previous backups will no longer be + writeable to. + + Args: + user_id(str): the user whose backup version we're creating + version_info(dict): metadata about the new version being created + + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + + Returns: + A deferred of a string that gives the new version number. + """ + # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version @@ -148,14 +221,36 @@ class E2eRoomKeysHandler(object): defer.returnValue(new_version) @defer.inlineCallbacks - def get_version_info(self, user_id, version): + def get_current_version_info(self, user_id): + """Get the user's current backup version. + + Args: + user_id(str): the user whose current backup version we're querying + Raises: + StoreError: code 404 if there is no current backup version + Returns: + A deferred of a info dict that gives the info about the new version. + + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_keys_version_info( - user_id, version - ) + results = yield self.store.get_e2e_room_keys_version_info(user_id) defer.returnValue(results) @defer.inlineCallbacks def delete_version(self, user_id, version): + """Deletes a given version of the user's e2e_room_keys backup + + Args: + user_id(str): the user whose current backup version we're deleting + version(str): the version id of the backup being deleted + Raises: + StoreError: code 404 if this backup version doesn't exist + """ + with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys_version(user_id, version) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 04547c7d43..d3f857aba2 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -47,7 +47,7 @@ class RoomKeysServlet(RestServlet): room_id: the ID of the room the keys are for (optional) session_id: the ID for the E2E room keys for the room (optional) version: the version of the user's backup which this data is for. - the version must already have been created via the /change_secret API. + the version must already have been created via the /room_keys/version API. Each session has: * first_message_index: a numeric index indicating the oldest message @@ -59,6 +59,9 @@ class RoomKeysServlet(RestServlet): * session_data: base64-encrypted data describing the session. Returns 200 OK on success with body {} + Returns 403 Forbidden if the version in question is not the most recently + created version (i.e. if this is an old client trying to write to a stale backup) + Returns 404 Not Found if the version in question doesn't exist The API is designed to be otherwise agnostic to the room_key encryption algorithm being used. Sessions are merged with existing ones in the @@ -251,6 +254,9 @@ class RoomKeysVersionServlet(RestServlet): changes the encryption key for their backups, ensuring that backups encrypted with different keys don't collide. + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + The algorithm passed in the version info is a reverse-DNS namespaced identifier to describe the format of the encrypted backupped keys. @@ -292,6 +298,9 @@ class RoomKeysVersionServlet(RestServlet): Retrieve the version information about a given version of the user's room_keys backup. + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + GET /room_keys/version/12345 HTTP/1.1 { "algorithm": "m.megolm_backup.v1", -- cgit 1.4.1 From 234611f3472b229d9e3a9ec7a27c51446f6a61ba Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:42:08 +0000 Subject: fix typos --- synapse/handlers/e2e_room_keys.py | 3 ++- synapse/storage/e2e_room_keys.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 2fa025bfc7..6446c3c6c3 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -110,7 +110,8 @@ class E2eRoomKeysHandler(object): # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version - version_info = yield self.get_current_version_info(user_id) + try: + version_info = yield self.get_current_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index e04e6a3690..b51faa1204 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -219,7 +219,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) return self.runInteraction( - desc="get_e2e_room_keys_version_info", + "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn ) -- cgit 1.4.1 From 982edca38026e2bb9085e77b4c25af60c4793f34 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:58:51 +0000 Subject: fix flakes --- synapse/handlers/e2e_room_keys.py | 4 ++-- synapse/storage/e2e_room_keys.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 6446c3c6c3..fdae69c600 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -136,7 +136,7 @@ class E2eRoomKeysHandler(object): """Upload a given room_key for a given room and session into a given version of the backup. Merges the key with any which might already exist. - Args: + Args: user_id(str): the user whose backup we're setting version(str): the version ID of the backup we're updating room_id(str): the ID of the room whose keys we're setting @@ -199,7 +199,7 @@ class E2eRoomKeysHandler(object): backup version for the user's keys; previous backups will no longer be writeable to. - Args: + Args: user_id(str): the user whose backup version we're creating version_info(dict): metadata about the new version being created diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index b51faa1204..7ab75070a2 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -202,13 +202,15 @@ class EndToEndRoomKeyStore(SQLBaseStore): "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) ) - version = txn.fetchone()[0] + this_version = txn.fetchone()[0] + else: + this_version = version return self._simple_select_one_txn( table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, - "version": version, + "version": this_version, }, retcols=( "user_id", -- cgit 1.4.1 From 5e42c45c96bb62a86604118250e9cb6c57b94254 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:10:31 +0000 Subject: switch get_current_version_info back to being get_version_info --- synapse/handlers/e2e_room_keys.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index fdae69c600..f08d80da3e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -111,7 +111,7 @@ class E2eRoomKeysHandler(object): with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version try: - version_info = yield self.get_current_version_info(user_id) + version_info = yield self.get_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -222,17 +222,20 @@ class E2eRoomKeysHandler(object): defer.returnValue(new_version) @defer.inlineCallbacks - def get_current_version_info(self, user_id): - """Get the user's current backup version. + def get_version_info(self, user_id, version=None): + """Get the info about a given version of the user's backup Args: user_id(str): the user whose current backup version we're querying + version(str): Optional; if None gives the most recent version + otherwise a historical one. Raises: - StoreError: code 404 if there is no current backup version + StoreError: code 404 if the requested backup version doesn't exist Returns: A deferred of a info dict that gives the info about the new version. { + "version": "1234", "algorithm": "m.megolm_backup.v1", "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } -- cgit 1.4.1 From 93d174bcc4cf773bea2534d3683a1e91e5489c89 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:10:49 +0000 Subject: improve docstring --- synapse/rest/client/v2_alpha/room_keys.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index d3f857aba2..ca69ced1e3 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -296,13 +296,17 @@ class RoomKeysVersionServlet(RestServlet): def on_GET(self, request, version): """ Retrieve the version information about a given version of the user's - room_keys backup. + room_keys backup. If the version part is missing, returns info about the + most current backup version (if any) It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. + Returns 404 is the given version does not exist. + GET /room_keys/version/12345 HTTP/1.1 { + "version": "12345", "algorithm": "m.megolm_backup.v1", "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } -- cgit 1.4.1 From b5eee511c73fdd9b5d1a7453433513791192a250 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:11:02 +0000 Subject: don't needlessly return user_id --- synapse/storage/e2e_room_keys.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 7ab75070a2..3c720f3b3e 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -213,7 +213,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): "version": this_version, }, retcols=( - "user_id", "version", "algorithm", "auth_data", -- cgit 1.4.1 From 174be586e5fca46013a4a8f03b4337f46b2502aa Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:11:15 +0000 Subject: first cut at a UT --- tests/handlers/test_e2e_room_keys.py | 141 +++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 tests/handlers/test_e2e_room_keys.py diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py new file mode 100644 index 0000000000..9d3bef6db2 --- /dev/null +++ b/tests/handlers/test_e2e_room_keys.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from synapse.api import errors +from twisted.internet import defer + +import synapse.api.errors +import synapse.handlers.e2e_room_keys + +import synapse.storage +from tests import unittest, utils + + +class E2eRoomKeysHandlerTestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) + self.hs = None # type: synapse.server.HomeServer + self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler + + @defer.inlineCallbacks + def setUp(self): + self.hs = yield utils.setup_test_homeserver( + handlers=None, + replication_layer=mock.Mock(), + ) + self.handler = synapse.handlers.e2e_keys.E2eRoomKeysHandler(self.hs) + + + @defer.inlineCallbacks + def test_get_missing_current_version_info(self): + """Check that we get a 404 if we ask for info about the current version + if there is no version. + """ + local_user = "@boris:" + self.hs.hostname + try: + res = yield self.handler.get_version_info(local_user); + except errors.SynapseError as e: + self.assertEqual(e.code, 404); + self.assertEqual(res, None); + + @defer.inlineCallbacks + def test_get_missing_version_info(self): + """Check that we get a 404 if we ask for info about a specific version + if it doesn't exist. + """ + local_user = "@boris:" + self.hs.hostname + try: + res = yield self.handler.get_version_info(local_user, "mrflibble"); + except errors.SynapseError as e: + self.assertEqual(e.code, 404); + self.assertEqual(res, None); + + @defer.inlineCallbacks + def test_create_version(self): + """Check that we can create and then retrieve versions. + """ + local_user = "@boris:" + self.hs.hostname + res = yield self.handler.create_version(user_id, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }); + self.assertEqual(res, "1"); + + # check we can retrieve it as the current version + res = yield self.handler.get_version_info(local_user); + self.assertDictEqual(res, { + "version": "1", + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }); + + # check we can retrieve it as a specific version + res = yield self.handler.get_version_info(local_user, "1"); + self.assertDictEqual(res, { + "version": "1", + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }); + + # upload a new one... + res = yield self.handler.create_version(user_id, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }); + self.assertEqual(res, "2"); + + # check we can retrieve it as the current version + res = yield self.handler.get_version_info(local_user); + self.assertDictEqual(res, { + "version": "2", + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }); + + @defer.inlineCallbacks + def test_delete_version(self): + """Check that we can create and then delete versions. + """ + local_user = "@boris:" + self.hs.hostname + res = yield self.handler.create_version(user_id, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }); + self.assertEqual(res, "1"); + + # check we can delete it + yield self.handler.delete_version(local_user, "1"); + + # check that it's gone + try: + res = yield self.handler.get_version_info(local_user, "1"); + except errors.SynapseError as e: + self.assertEqual(e.code, 404); + self.assertEqual(res, None); + + + @defer.inlineCallbacks + def test_get_room_keys(self): + pass + + @defer.inlineCallbacks + def test_upload_room_keys(self): + pass + + @defer.inlineCallbacks + def test_delete_room_keys(self): + pass -- cgit 1.4.1 From 15d513f16fd272fb3763a42f05a8f296d4e1abf0 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:35:25 +0000 Subject: fix idiocies and so make tests pass --- synapse/storage/e2e_room_keys.py | 5 +++-- synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- tests/handlers/test_e2e_room_keys.py | 19 +++++++++++-------- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 3c720f3b3e..e4d56b7c37 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -207,6 +207,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): this_version = version return self._simple_select_one_txn( + txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, @@ -243,9 +244,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) current_version = txn.fetchone()[0] if current_version is None: - current_version = 0 + current_version = '0' - new_version = current_version + 1 + new_version = str(int(current_version) + 1) self._simple_insert_txn( txn, diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 16499ac34c..4531fd56ee 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -35,4 +35,4 @@ CREATE TABLE e2e_room_keys_versions ( auth_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_versions_user_idx ON e2e_room_keys_versions(user_id); +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 9d3bef6db2..6f4b3a147a 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -37,7 +37,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): handlers=None, replication_layer=mock.Mock(), ) - self.handler = synapse.handlers.e2e_keys.E2eRoomKeysHandler(self.hs) + self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) @defer.inlineCallbacks @@ -46,6 +46,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): if there is no version. """ local_user = "@boris:" + self.hs.hostname + res = None try: res = yield self.handler.get_version_info(local_user); except errors.SynapseError as e: @@ -58,6 +59,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): if it doesn't exist. """ local_user = "@boris:" + self.hs.hostname + res = None try: res = yield self.handler.get_version_info(local_user, "mrflibble"); except errors.SynapseError as e: @@ -69,7 +71,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we can create and then retrieve versions. """ local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }); @@ -92,7 +94,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }); # upload a new one... - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", }); @@ -111,7 +113,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we can create and then delete versions. """ local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }); @@ -121,21 +123,22 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): yield self.handler.delete_version(local_user, "1"); # check that it's gone + res = None try: res = yield self.handler.get_version_info(local_user, "1"); except errors.SynapseError as e: self.assertEqual(e.code, 404); - self.assertEqual(res, None); + self.assertEqual(res, None); @defer.inlineCallbacks def test_get_room_keys(self): - pass + yield None @defer.inlineCallbacks def test_upload_room_keys(self): - pass + yield None @defer.inlineCallbacks def test_delete_room_keys(self): - pass + yield None -- cgit 1.4.1 From f6a3067868e7da2222484feb9aa421b1dcfecd0a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:42:10 +0000 Subject: linting --- tests/handlers/test_e2e_room_keys.py | 48 +++++++++++++++++------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 6f4b3a147a..afe6ecf27b 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -39,7 +39,6 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) - @defer.inlineCallbacks def test_get_missing_current_version_info(self): """Check that we get a 404 if we ask for info about the current version @@ -48,10 +47,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user); + res = yield self.handler.get_version_info(local_user) except errors.SynapseError as e: - self.assertEqual(e.code, 404); - self.assertEqual(res, None); + self.assertEqual(e.code, 404) + self.assertEqual(res, None) @defer.inlineCallbacks def test_get_missing_version_info(self): @@ -61,10 +60,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user, "mrflibble"); + res = yield self.handler.get_version_info(local_user, "mrflibble") except errors.SynapseError as e: - self.assertEqual(e.code, 404); - self.assertEqual(res, None); + self.assertEqual(e.code, 404) + self.assertEqual(res, None) @defer.inlineCallbacks def test_create_version(self): @@ -74,39 +73,39 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", - }); - self.assertEqual(res, "1"); + }) + self.assertEqual(res, "1") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user); + res = yield self.handler.get_version_info(local_user) self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", - }); + }) # check we can retrieve it as a specific version - res = yield self.handler.get_version_info(local_user, "1"); + res = yield self.handler.get_version_info(local_user, "1") self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", - }); + }) # upload a new one... res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", - }); - self.assertEqual(res, "2"); + }) + self.assertEqual(res, "2") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user); + res = yield self.handler.get_version_info(local_user) self.assertDictEqual(res, { "version": "2", "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", - }); + }) @defer.inlineCallbacks def test_delete_version(self): @@ -116,20 +115,19 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", - }); - self.assertEqual(res, "1"); + }) + self.assertEqual(res, "1") # check we can delete it - yield self.handler.delete_version(local_user, "1"); + yield self.handler.delete_version(local_user, "1") # check that it's gone - res = None + res = None try: - res = yield self.handler.get_version_info(local_user, "1"); + res = yield self.handler.get_version_info(local_user, "1") except errors.SynapseError as e: - self.assertEqual(e.code, 404); - self.assertEqual(res, None); - + self.assertEqual(e.code, 404) + self.assertEqual(res, None) @defer.inlineCallbacks def test_get_room_keys(self): -- cgit 1.4.1 From fe87890b18f57f0268bd65aeca881e7817bbe9e4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 17:47:11 +0000 Subject: implement remaining tests and make them work --- synapse/handlers/e2e_room_keys.py | 35 +++- synapse/rest/client/v2_alpha/room_keys.py | 6 + synapse/storage/e2e_room_keys.py | 3 +- tests/handlers/test_e2e_room_keys.py | 276 +++++++++++++++++++++++++++--- 4 files changed, 287 insertions(+), 33 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index f08d80da3e..09c2888db6 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -58,6 +58,10 @@ class E2eRoomKeysHandler(object): results = yield self.store.get_e2e_room_keys( user_id, version, room_id, session_id ) + + if results['rooms'] == {}: + raise SynapseError(404, "No room_keys found") + defer.returnValue(results) @defer.inlineCallbacks @@ -109,9 +113,10 @@ class E2eRoomKeysHandler(object): # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): + # Check that the version we're trying to upload is the current version try: - version_info = yield self.get_version_info(user_id) + version_info = yield self._get_version_info_unlocked(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -119,16 +124,23 @@ class E2eRoomKeysHandler(object): raise e if version_info['version'] != version: - raise RoomKeysVersionError(current_version=version_info.version) + # Check that the version we're trying to upload actually exists + try: + version_info = yield self._get_version_info_unlocked(user_id, version) + # if we get this far, the version must exist + raise RoomKeysVersionError(current_version=version_info['version']) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. for room_id, room in room_keys['rooms'].iteritems(): for session_id, session in room['sessions'].iteritems(): - room_key = session[session_id] - yield self._upload_room_key( - user_id, version, room_id, session_id, room_key + user_id, version, room_id, session_id, session ) @defer.inlineCallbacks @@ -242,8 +254,17 @@ class E2eRoomKeysHandler(object): """ with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_keys_version_info(user_id) - defer.returnValue(results) + res = yield self._get_version_info_unlocked(user_id, version) + defer.returnValue(res) + + @defer.inlineCallbacks + def _get_version_info_unlocked(self, user_id, version=None): + """Get the info about a given version of the user's backup + without obtaining the upload_linearizer lock. For params see get_version_info + """ + + results = yield self.store.get_e2e_room_keys_version_info(user_id, version) + defer.returnValue(results) @defer.inlineCallbacks def delete_version(self, user_id, version): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index ca69ced1e3..8f10e4e1cd 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -204,6 +204,12 @@ class RoomKeysServlet(RestServlet): room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id ) + + if session_id: + room_keys = room_keys['rooms'][room_id]['sessions'][session_id] + elif room_id: + room_keys = room_keys['rooms'][room_id] + defer.returnValue((200, room_keys)) @defer.inlineCallbacks diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index e4d56b7c37..8e8e4e457c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -58,6 +58,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) + @defer.inlineCallbacks def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): """Replaces or inserts the encrypted E2E room key for a given session in a given backup @@ -135,7 +136,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = {} + sessions = { 'rooms': {} } for row in rows: room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) room_entry['sessions'][row['session_id']] = { diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index afe6ecf27b..3cbfd6f9d0 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -17,6 +17,7 @@ import mock from synapse.api import errors from twisted.internet import defer +import copy import synapse.api.errors import synapse.handlers.e2e_room_keys @@ -25,6 +26,22 @@ import synapse.storage from tests import unittest, utils +# sample room_key data for use in the tests +room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } +} + class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) @@ -38,46 +55,44 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): replication_layer=mock.Mock(), ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) + self.local_user = "@boris:" + self.hs.hostname; @defer.inlineCallbacks def test_get_missing_current_version_info(self): """Check that we get a 404 if we ask for info about the current version if there is no version. """ - local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user) + yield self.handler.get_version_info(self.local_user) except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks def test_get_missing_version_info(self): """Check that we get a 404 if we ask for info about a specific version if it doesn't exist. """ - local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user, "mrflibble") + yield self.handler.get_version_info(self.local_user, "bogus_version") except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks def test_create_version(self): """Check that we can create and then retrieve versions. """ - local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }) self.assertEqual(res, "1") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user) + res = yield self.handler.get_version_info(self.local_user) self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", @@ -85,7 +100,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }) # check we can retrieve it as a specific version - res = yield self.handler.get_version_info(local_user, "1") + res = yield self.handler.get_version_info(self.local_user, "1") self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", @@ -93,14 +108,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }) # upload a new one... - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", }) self.assertEqual(res, "2") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user) + res = yield self.handler.get_version_info(self.local_user) self.assertDictEqual(res, { "version": "2", "algorithm": "m.megolm_backup.v1", @@ -111,32 +126,243 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_delete_version(self): """Check that we can create and then delete versions. """ - local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }) self.assertEqual(res, "1") # check we can delete it - yield self.handler.delete_version(local_user, "1") + yield self.handler.delete_version(self.local_user, "1") # check that it's gone res = None try: - res = yield self.handler.get_version_info(local_user, "1") + yield self.handler.get_version_info(self.local_user, "1") except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks - def test_get_room_keys(self): - yield None + def test_get_missing_room_keys(self): + """Check that we get a 404 on querying missing room_keys + """ + res = None + try: + yield self.handler.get_room_keys(self.local_user, "bogus_version") + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check we also get a 404 even if the version is valid + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + res = None + try: + yield self.handler.get_room_keys(self.local_user, version) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # TODO: test the locking semantics when uploading room_keys, + # although this is probably best done in sytest @defer.inlineCallbacks - def test_upload_room_keys(self): - yield None + def test_upload_room_keys_no_versions(self): + """Check that we get a 404 on uploading keys when no versions are defined + """ + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_upload_room_keys_bogus_version(self): + """Check that we get a 404 on uploading keys when an nonexistent version is specified + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "bogus_version", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_upload_room_keys_wrong_version(self): + """Check that we get a 403 on uploading keys for an old version + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }) + self.assertEqual(version, "2") + + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "1", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 403) + + @defer.inlineCallbacks + def test_upload_room_keys_insert(self): + """Check that we can insert and retrieve keys for a session + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertDictEqual(res, room_keys) + + # check getting room_keys for a given room + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org" + ) + self.assertDictEqual(res, room_keys) + + # check getting room_keys for a given session_id + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + self.assertDictEqual(res, room_keys) + + @defer.inlineCallbacks + def test_upload_room_keys_merge(self): + """Check that we can upload a new room_key for an existing session and + have it correctly merged""" + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + + new_room_keys = copy.deepcopy(room_keys) + + # test that increasing the message_index doesn't replace the existing session + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['first_message_index'] = 2 + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'new' + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "SSBBTSBBIEZJU0gK" + ) + + # test that marking the session as verified however /does/ replace it + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['is_verified'] = True + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "new" + ) + + # test that a session with a higher forwarded_count doesn't replace one + # with a lower forwarding count + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['forwarded_count'] = 2 + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'other' + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "new" + ) + + # TODO: check edge cases as well as the common variations here @defer.inlineCallbacks def test_delete_room_keys(self): - yield None + """Check that we can insert and delete keys for a session + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + # check for bulk-delete + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys(self.local_user, version) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check for bulk-delete per room + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + ) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check for bulk-delete per session + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) -- cgit 1.4.1 From edc427a35187e462458d188039e761ffdb7e486d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 17:50:55 +0000 Subject: flake8 --- synapse/storage/e2e_room_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 8e8e4e457c..c2f226396d 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -136,7 +136,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = { 'rooms': {} } + sessions = {'rooms': {}} for row in rows: room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) room_entry['sessions'][row['session_id']] = { diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 3cbfd6f9d0..6e43543ed9 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -42,6 +42,7 @@ room_keys = { } } + class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) @@ -55,7 +56,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): replication_layer=mock.Mock(), ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) - self.local_user = "@boris:" + self.hs.hostname; + self.local_user = "@boris:" + self.hs.hostname @defer.inlineCallbacks def test_get_missing_current_version_info(self): @@ -184,7 +185,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def test_upload_room_keys_bogus_version(self): - """Check that we get a 404 on uploading keys when an nonexistent version is specified + """Check that we get a 404 on uploading keys when an nonexistent version + is specified """ version = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", @@ -194,7 +196,9 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = None try: - yield self.handler.upload_room_keys(self.local_user, "bogus_version", room_keys) + yield self.handler.upload_room_keys( + self.local_user, "bogus_version", room_keys + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 404) @@ -267,10 +271,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): yield self.handler.upload_room_keys(self.local_user, version, room_keys) new_room_keys = copy.deepcopy(room_keys) + new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33'] # test that increasing the message_index doesn't replace the existing session - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['first_message_index'] = 2 - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'new' + new_room_key['first_message_index'] = 2 + new_room_key['session_data'] = 'new' yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) @@ -280,7 +285,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): ) # test that marking the session as verified however /does/ replace it - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['is_verified'] = True + new_room_key['is_verified'] = True yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) @@ -291,8 +296,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # test that a session with a higher forwarded_count doesn't replace one # with a lower forwarding count - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['forwarded_count'] = 2 - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'other' + new_room_key['forwarded_count'] = 2 + new_room_key['session_data'] = 'other' yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) -- cgit 1.4.1 From 72788cf9c1f018a5346a8f9204c6bfe058289fd2 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:45:26 +0000 Subject: support DELETE /version with no args --- synapse/handlers/e2e_room_keys.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 09c2888db6..a43fc7fc7e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -267,7 +267,7 @@ class E2eRoomKeysHandler(object): defer.returnValue(results) @defer.inlineCallbacks - def delete_version(self, user_id, version): + def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup Args: diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8f10e4e1cd..63b1f62f90 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -308,7 +308,7 @@ class RoomKeysVersionServlet(RestServlet): It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. - Returns 404 is the given version does not exist. + Returns 404 if the given version does not exist. GET /room_keys/version/12345 HTTP/1.1 { @@ -330,7 +330,8 @@ class RoomKeysVersionServlet(RestServlet): def on_DELETE(self, request, version): """ Delete the information about a given version of the user's - room_keys backup. Doesn't delete the actual room data. + room_keys backup. If the version part is missing, deletes the most + current backup version (if any). Doesn't delete the actual room data. DELETE /room_keys/version/12345 HTTP/1.1 HTTP/1.1 200 OK -- cgit 1.4.1 From 66a4ca1d28c2c2e22a0343e6db0f5a2bce9ec987 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:45:55 +0000 Subject: 404 nicely if you try to interact with a missing current version --- synapse/storage/e2e_room_keys.py | 51 +++++++++++++++++++++++++----------- tests/handlers/test_e2e_room_keys.py | 22 ++++++++++++++++ 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c2f226396d..d82f223e86 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -184,6 +184,17 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="delete_e2e_room_keys", ) + @staticmethod + def _get_current_version(txn, user_id): + txn.execute( + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + (user_id,) + ) + row = txn.fetchone() + if not row: + raise StoreError(404, 'No current backup version') + return row[0] + def get_e2e_room_keys_version_info(self, user_id, version=None): """Get info metadata about a version of our room_keys backup. @@ -199,11 +210,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): def _get_e2e_room_keys_version_info_txn(txn): if version is None: - txn.execute( - "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", - (user_id,) - ) - this_version = txn.fetchone()[0] + this_version = self._get_current_version(txn, user_id) else: this_version = version @@ -266,23 +273,35 @@ class EndToEndRoomKeyStore(SQLBaseStore): "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) - @defer.inlineCallbacks - def delete_e2e_room_keys_version(self, user_id, version): + def delete_e2e_room_keys_version(self, user_id, version=None): """Delete a given backup version of the user's room keys. Doesn't delete their actual key data. Args: user_id(str): the user whose backup version we're deleting - version(str): the ID of the backup version we're deleting + version(str): Optional. the version ID of the backup version we're deleting + If missing, we delete the current backup version info. + Raises: + StoreError: with code 404 if there are no e2e_room_keys_versions present, + or if the version requested doesn't exist. """ - keyvalues = { - "user_id": user_id, - "version": version, - } + def _delete_e2e_room_keys_version_txn(txn): + if version is None: + this_version = self._get_current_version(txn, user_id) + else: + this_version = version - yield self._simple_delete( - table="e2e_room_keys_versions", - keyvalues=keyvalues, - desc="delete_e2e_room_keys_version", + return self._simple_delete_one_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": this_version, + }, + ) + + return self.runInteraction( + "delete_e2e_room_keys_version", + _delete_e2e_room_keys_version_txn ) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 6e43543ed9..8bfffb5c0e 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -123,6 +123,28 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): "auth_data": "second_version_auth_data", }) + @defer.inlineCallbacks + def test_delete_missing_version(self): + """Check that we get a 404 on deleting nonexistent versions + """ + res = None + try: + yield self.handler.delete_version(self.local_user, "1") + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_delete_missing_current_version(self): + """Check that we get a 404 on deleting nonexistent current version + """ + res = None + try: + yield self.handler.delete_version(self.local_user) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + @defer.inlineCallbacks def test_delete_version(self): """Check that we can create and then delete versions. -- cgit 1.4.1 From 54ac18e8327e689b9f91b93ad8caac2f6b3dd29c Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:48:22 +0000 Subject: use parse_string --- synapse/rest/client/v2_alpha/room_keys.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 63b1f62f90..afb8a36dd0 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -123,7 +123,7 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = request.args.get("version")[0] + version = parse_string(request, "version") if session_id: body = { @@ -199,7 +199,7 @@ class RoomKeysServlet(RestServlet): """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version")[0] + version = parse_string(request, "version") room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id @@ -229,7 +229,7 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version")[0] + version = parse_string(request, "version") yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id -- cgit 1.4.1 From f0cede5556a54ece6cd5289856bb230f8009c4fa Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:58:32 +0000 Subject: missing import --- synapse/storage/e2e_room_keys.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index d82f223e86..089989fcfa 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -14,6 +14,7 @@ # limitations under the License. from twisted.internet import defer +from synapse.api.errors import StoreError from ._base import SQLBaseStore -- cgit 1.4.1 From 4f7064f6b5a1822067bfbf358317bc1dbb51b9c6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:59:07 +0000 Subject: missing import --- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index afb8a36dd0..9f0172e6f5 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request + RestServlet, parse_json_object_from_request, parse_string ) from ._base import client_v2_patterns -- cgit 1.4.1 From 8550a7e9c2fcf9e46c717127d14c79010350a998 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 21 Aug 2018 10:38:00 -0400 Subject: allow auth_data to be any JSON instead of a string --- synapse/storage/e2e_room_keys.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 089989fcfa..c7b1fad21e 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -15,6 +15,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError +import simplejson as json from ._base import SQLBaseStore @@ -215,7 +216,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): else: this_version = version - return self._simple_select_one_txn( + result = self._simple_select_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ @@ -228,6 +229,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): "auth_data", ), ) + result["auth_data"] = json.loads(result["auth_data"]) + return result return self.runInteraction( "get_e2e_room_keys_version_info", @@ -264,7 +267,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "user_id": user_id, "version": new_version, "algorithm": info["algorithm"], - "auth_data": info["auth_data"], + "auth_data": json.dumps(info["auth_data"]), }, ) -- cgit 1.4.1 From 42a394caa2884096e5afe1f1c2c75680792769d8 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 21 Aug 2018 14:51:34 -0400 Subject: allow session_data to be any JSON instead of just a string --- synapse/storage/e2e_room_keys.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c7b1fad21e..b695570a7b 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -58,6 +58,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) + row["session_data"] = json.loads(row["session_data"]); + defer.returnValue(row) @defer.inlineCallbacks @@ -87,7 +89,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "first_message_index": room_key['first_message_index'], "forwarded_count": room_key['forwarded_count'], "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], + "session_data": json.dumps(room_key['session_data']), }, lock=False, ) @@ -145,7 +147,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "first_message_index": row["first_message_index"], "forwarded_count": row["forwarded_count"], "is_verified": row["is_verified"], - "session_data": row["session_data"], + "session_data": json.loads(row["session_data"]), } defer.returnValue(sessions) -- cgit 1.4.1 From 16a31c6fcebe31bbdb655ebcb24ccce426b3e994 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Fri, 24 Aug 2018 22:51:25 -0400 Subject: update to newer Synapse APIs --- synapse/handlers/e2e_room_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index a43fc7fc7e..c09816b372 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -18,7 +18,7 @@ import logging from twisted.internet import defer from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError -from synapse.util.async import Linearizer +from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 8bfffb5c0e..7fa4264441 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -52,6 +52,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( + self.addCleanup, handlers=None, replication_layer=mock.Mock(), ) -- cgit 1.4.1 From 5f02017aead05adf127b89d7e203849982bd7fdc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 10:20:40 +0100 Subject: Improve performance of getting typing updates for replication Fetching the list of all new typing notifications involved iterating over all rooms and comparing their serial. Lets move to using a stream change cache, like we do for other streams. --- synapse/handlers/typing.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 2d2d3d5a0d..65f475d639 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.types import UserID, get_domain_from_id +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -68,6 +69,11 @@ class TypingHandler(object): # map room IDs to sets of users currently typing self._room_typing = {} + # caches which room_ids changed at which serials + self._typing_stream_change_cache = StreamChangeCache( + "TypingStreamChangeCache", self._latest_room_serial, + ) + self.clock.looping_call( self._handle_timeouts, 5000, @@ -274,19 +280,29 @@ class TypingHandler(object): self._latest_room_serial += 1 self._room_serials[member.room_id] = self._latest_room_serial + self._typing_stream_change_cache.entity_has_changed( + member.room_id, self._latest_room_serial, + ) self.notifier.on_new_event( "typing_key", self._latest_room_serial, rooms=[member.room_id] ) def get_all_typing_updates(self, last_id, current_id): - # TODO: Work out a way to do this without scanning the entire state. if last_id == current_id: return [] + changed_rooms = self._typing_stream_change_cache.get_all_entities_changed( + last_id, + ) + + if changed_rooms is None: + changed_rooms = self._room_serials + rows = [] - for room_id, serial in self._room_serials.items(): - if last_id < serial and serial <= current_id: + for room_id in changed_rooms: + serial = self._room_serials[room_id] + if last_id < serial <= current_id: typing = self._room_typing[room_id] rows.append((serial, room_id, list(typing))) rows.sort() -- cgit 1.4.1 From 3801b8aa035594972c400c8bd036894a388c4ab3 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 6 Sep 2018 11:23:16 -0400 Subject: try to make flake8 and isort happy --- synapse/api/errors.py | 1 + synapse/handlers/e2e_room_keys.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 5 ++++- synapse/storage/e2e_room_keys.py | 6 ++++-- tests/handlers/test_e2e_room_keys.py | 9 +++++---- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 3002c95dd1..140dbfe8b8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -326,6 +326,7 @@ class RoomKeysVersionError(SynapseError): ) self.current_version = current_version + class IncompatibleRoomVersionError(SynapseError): """A server is trying to join a room whose version it does not support.""" diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index c09816b372..2c330382cf 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError +from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 9f0172e6f5..1ed18e986f 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -19,8 +19,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, parse_string + RestServlet, + parse_json_object_from_request, + parse_string, ) + from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index b695570a7b..969f4aef9c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -13,9 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import simplejson as json + from twisted.internet import defer + from synapse.api.errors import StoreError -import simplejson as json from ._base import SQLBaseStore @@ -58,7 +60,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) - row["session_data"] = json.loads(row["session_data"]); + row["session_data"] = json.loads(row["session_data"]) defer.returnValue(row) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 7fa4264441..9e08eac0a5 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -14,17 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy + import mock -from synapse.api import errors + from twisted.internet import defer -import copy import synapse.api.errors import synapse.handlers.e2e_room_keys - import synapse.storage -from tests import unittest, utils +from synapse.api import errors +from tests import unittest, utils # sample room_key data for use in the tests room_keys = { -- cgit 1.4.1 From a40802bcbc2838033ecfe468d7845adbb1f3cbb7 Mon Sep 17 00:00:00 2001 From: Luca Weiss Date: Fri, 31 Aug 2018 14:40:17 +0200 Subject: Fix build of Docker image with docker-compose ... and fix a typo --- contrib/docker/docker-compose.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 3a8dfbae34..61ec700afd 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,9 +6,11 @@ version: '3' services: synapse: - build: ../.. + build: + context: ../.. + dockerfile: docker/Dockerfile image: docker.io/matrixdotorg/synapse:latest - # Since snyapse does not retry to connect to the database, restart upon + # Since synapse does not retry to connect to the database, restart upon # failure restart: unless-stopped # See the readme for a full documentation of the environment settings -- cgit 1.4.1 From f8825748dd76f3d806b0db14b8e61d267f2ea121 Mon Sep 17 00:00:00 2001 From: Luca Weiss Date: Tue, 11 Sep 2018 12:56:31 +0200 Subject: changelog.d entry somehow got lost --- changelog.d/3778.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3778.misc diff --git a/changelog.d/3778.misc b/changelog.d/3778.misc new file mode 100644 index 0000000000..b78a2c9f42 --- /dev/null +++ b/changelog.d/3778.misc @@ -0,0 +1 @@ +Fix build of Docker image with docker-compose -- cgit 1.4.1 From bc74925c5b94f0c02603297d4ccb7e05008d5124 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 13 Sep 2018 17:02:59 +0100 Subject: WIP e2e key backups Continues from uhoreg's branch This just fixed the errcode on /room_keys/version if no backup and updates the schema delta to be on the latest so it gets run --- synapse/rest/client/v2_alpha/room_keys.py | 14 ++++++--- synapse/storage/schema/delta/46/e2e_room_keys.sql | 38 ----------------------- synapse/storage/schema/delta/51/e2e_room_keys.sql | 38 +++++++++++++++++++++++ 3 files changed, 48 insertions(+), 42 deletions(-) delete mode 100644 synapse/storage/schema/delta/46/e2e_room_keys.sql create mode 100644 synapse/storage/schema/delta/51/e2e_room_keys.sql diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 1ed18e986f..ea114bc8b4 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, Codes from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, @@ -324,9 +324,15 @@ class RoomKeysVersionServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - info = yield self.e2e_room_keys_handler.get_version_info( - user_id, version - ) + try: + info = yield self.e2e_room_keys_handler.get_version_info( + user_id, version + ) + except SynapseError as e: + if e.code == 404: + e.errcode = Codes.NOT_FOUND + e.msg = "No backup found" + raise e defer.returnValue((200, info)) @defer.inlineCallbacks diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql deleted file mode 100644 index 4531fd56ee..0000000000 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2017 New Vector Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- users' optionally backed up encrypted e2e sessions -CREATE TABLE e2e_room_keys ( - user_id TEXT NOT NULL, - room_id TEXT NOT NULL, - session_id TEXT NOT NULL, - version TEXT NOT NULL, - first_message_index INT, - forwarded_count INT, - is_verified BOOLEAN, - session_data TEXT NOT NULL -); - -CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); - --- the metadata for each generation of encrypted e2e session backups -CREATE TABLE e2e_room_keys_versions ( - user_id TEXT NOT NULL, - version TEXT NOT NULL, - algorithm TEXT NOT NULL, - auth_data TEXT NOT NULL -); - -CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); diff --git a/synapse/storage/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/delta/51/e2e_room_keys.sql new file mode 100644 index 0000000000..4531fd56ee --- /dev/null +++ b/synapse/storage/schema/delta/51/e2e_room_keys.sql @@ -0,0 +1,38 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- users' optionally backed up encrypted e2e sessions +CREATE TABLE e2e_room_keys ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + session_id TEXT NOT NULL, + version TEXT NOT NULL, + first_message_index INT, + forwarded_count INT, + is_verified BOOLEAN, + session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); + +-- the metadata for each generation of encrypted e2e session backups +CREATE TABLE e2e_room_keys_versions ( + user_id TEXT NOT NULL, + version TEXT NOT NULL, + algorithm TEXT NOT NULL, + auth_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); -- cgit 1.4.1 From 7de1989ea2809c13b67be6c4990b269d817fae44 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 13 Sep 2018 22:43:50 +0100 Subject: fix link for case that config.email_riot_base_url is set --- synapse/push/mailer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b78ce10396..1a5a10d974 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -441,7 +441,7 @@ class Mailer(object): def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: - base_url = self.hs.config.email_riot_base_url + base_url = "%s/#/room" % (self.hs.config.email_riot_base_url) elif self.app_name == "Vector": # need /beta for Universal Links to work on iOS base_url = "https://vector.im/beta/#/room" -- cgit 1.4.1 From bef1d6f3bfdda4b07b7c72d284a6c79aed9bc93c Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 13 Sep 2018 22:53:35 +0100 Subject: towncrier --- changelog.d/3868.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3868.bugfix diff --git a/changelog.d/3868.bugfix b/changelog.d/3868.bugfix new file mode 100644 index 0000000000..99da8389a5 --- /dev/null +++ b/changelog.d/3868.bugfix @@ -0,0 +1 @@ +Fix broken invite email links for self hosted riots -- cgit 1.4.1 From d42d79e3c3cc229fa6bb268a97edde4ac81e2df5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 22:27:41 +0100 Subject: don't ratelimit autojoins --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1e53f2c635..da914c46ff 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -534,4 +534,5 @@ class RegistrationHandler(BaseHandler): room_id=room_id, remote_room_hosts=remote_room_hosts, action="join", + ratelimit=False, ) -- cgit 1.4.1 From c71b93f2a4bec7d12ce905388336e897a5227b35 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 22:28:28 +0100 Subject: changelog --- changelog.d/3879.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3879.bugfix diff --git a/changelog.d/3879.bugfix b/changelog.d/3879.bugfix new file mode 100644 index 0000000000..82cb2a8ebc --- /dev/null +++ b/changelog.d/3879.bugfix @@ -0,0 +1 @@ +Don't ratelimit autojoins -- cgit 1.4.1 From 9c749a6b612d538a0d8374152ff358508f733734 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 10:22:27 +1000 Subject: Added 'MAX_UPLOAD_SIZE' variable and set default to "10M" --- docker/conf/homeserver.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index 6bc25bb45f..cfe88788f2 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -85,7 +85,7 @@ federation_rc_concurrent: 3 media_store_path: "/data/media" uploads_path: "/data/uploads" -max_upload_size: "10M" +max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}" max_image_pixels: "32M" dynamic_thumbnails: false -- cgit 1.4.1 From f472abd792c5f3d969bb16c9ad7ee278b87ee761 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 10:28:29 +1000 Subject: Added description for "SYNAPSE_MAX_UPLOAD_SIZE" variable. --- docker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/README.md b/docker/README.md index 038c78f7c0..ecdf8a8b17 100644 --- a/docker/README.md +++ b/docker/README.md @@ -88,6 +88,7 @@ variables are available for configuration: * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. +* ``SYNAPSE_MAX_UPLOAD_SIZE``, the max upload size [default `10M`]. Shared secrets, that will be initialized to random values if not set: -- cgit 1.4.1 From da864a92c9eb8335fdd5602bcd4afe21c98cca39 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 12:59:23 +1000 Subject: Added description for "SYNAPSE_MAX_UPLOAD_SIZE" variable. --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index ecdf8a8b17..3c00d1e948 100644 --- a/docker/README.md +++ b/docker/README.md @@ -88,7 +88,7 @@ variables are available for configuration: * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. -* ``SYNAPSE_MAX_UPLOAD_SIZE``, the max upload size [default `10M`]. +* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`]. Shared secrets, that will be initialized to random values if not set: -- cgit 1.4.1 From 0e46ff690473538477037d5dbd2200f6863fc68e Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 13:33:33 +1000 Subject: Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. Signed-off-by: Simon Dwyer --- changelog.d/3883.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3883.feature diff --git a/changelog.d/3883.feature b/changelog.d/3883.feature new file mode 100644 index 0000000000..c11e5c5309 --- /dev/null +++ b/changelog.d/3883.feature @@ -0,0 +1 @@ +Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. \ No newline at end of file -- cgit 1.4.1 From 85a43f4167b5e775f7c3afe96d71137818561272 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 13:19:00 +0100 Subject: Return a 404 when deleting unknown room alias As per https://github.com/matrix-org/matrix-doc/issues/1675 Fixes https://github.com/matrix-org/synapse/issues/2782 --- synapse/handlers/directory.py | 19 ++++++++++++++++--- synapse/storage/directory.py | 1 - 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ef866da1b6..c745e6740b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -20,7 +20,14 @@ import string from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + CodeMessageException, + Codes, + NotFoundError, + StoreError, + SynapseError, +) from synapse.types import RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler): def delete_association(self, requester, user_id, room_alias): # association deletion for human users - can_delete = yield self._user_can_delete_alias(room_alias, user_id) + try: + can_delete = yield self._user_can_delete_alias(room_alias, user_id) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown room alias") + raise + if not can_delete: raise AuthError( 403, "You don't have permission to delete the alias.", @@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator and creator == user_id: + if creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 808194236a..cfb687cb53 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -75,7 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore): }, retcol="creator", desc="get_room_alias_creator", - allow_none=True ) @cached(max_entries=5000) -- cgit 1.4.1 From c1ae6b1bce61fecda91aa6ab8a51d152418cf79e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 13:21:08 +0100 Subject: changelog --- changelog.d/3889.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3889.bugfix diff --git a/changelog.d/3889.bugfix b/changelog.d/3889.bugfix new file mode 100644 index 0000000000..e976425987 --- /dev/null +++ b/changelog.d/3889.bugfix @@ -0,0 +1 @@ +Fix 500 error when deleting unknown room alias -- cgit 1.4.1 From fe88907d044c79844612b3950421b48c20fd22ad Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Sep 2018 22:33:22 +1000 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 9dbe0b9f10..f6a194dfc2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.4" +__version__ = "0.33.5rc1" -- cgit 1.4.1 From b56ef14629fe4ee07f3a166073a55509fc69282d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Sep 2018 22:36:41 +1000 Subject: update python 3 changelog --- changelog.d/3576.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3576.feature b/changelog.d/3576.feature index 02a10e370d..a624780bab 100644 --- a/changelog.d/3576.feature +++ b/changelog.d/3576.feature @@ -1 +1 @@ -Python 3.5+ is now supported. +Python 3.5 and 3.6 support is now in beta. -- cgit 1.4.1 From 1e70f1dbab352fc559e3ae815f7855503cca6d41 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Sep 2018 22:36:56 +1000 Subject: changelog --- CHANGES.md | 58 ++++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/3576.feature | 1 - changelog.d/3704.misc | 1 - changelog.d/3771.misc | 1 - changelog.d/3788.bugfix | 1 - changelog.d/3789.misc | 1 - changelog.d/3790.feature | 1 - changelog.d/3795.misc | 1 - changelog.d/3800.bugfix | 1 - changelog.d/3803.misc | 1 - changelog.d/3804.bugfix | 1 - changelog.d/3805.misc | 1 - changelog.d/3806.misc | 1 - changelog.d/3808.misc | 1 - changelog.d/3810.bugfix | 1 - changelog.d/3822.misc | 1 - changelog.d/3823.misc | 1 - changelog.d/3824.bugfix | 1 - changelog.d/3826.misc | 1 - changelog.d/3827.misc | 1 - changelog.d/3834.misc | 1 - changelog.d/3835.bugfix | 1 - changelog.d/3840.misc | 1 - changelog.d/3841.bugfix | 1 - changelog.d/3845.bugfix | 1 - changelog.d/3846.feature | 1 - changelog.d/3847.misc | 1 - changelog.d/3851.bugfix | 1 - changelog.d/3853.misc | 1 - changelog.d/3855.misc | 1 - changelog.d/3856.misc | 1 - changelog.d/3857.misc | 1 - changelog.d/3858.misc | 1 - changelog.d/3859.bugfix | 1 - changelog.d/3860.misc | 1 - changelog.d/3871.misc | 1 - changelog.d/3872.misc | 1 - changelog.d/3874.bugfix | 0 changelog.d/3875.bugfix | 1 - changelog.d/3877.misc | 1 - changelog.d/3888.misc | 1 - 41 files changed, 58 insertions(+), 39 deletions(-) delete mode 100644 changelog.d/3576.feature delete mode 100644 changelog.d/3704.misc delete mode 100644 changelog.d/3771.misc delete mode 100644 changelog.d/3788.bugfix delete mode 100644 changelog.d/3789.misc delete mode 100644 changelog.d/3790.feature delete mode 100644 changelog.d/3795.misc delete mode 100644 changelog.d/3800.bugfix delete mode 100644 changelog.d/3803.misc delete mode 100644 changelog.d/3804.bugfix delete mode 100644 changelog.d/3805.misc delete mode 100644 changelog.d/3806.misc delete mode 100644 changelog.d/3808.misc delete mode 100644 changelog.d/3810.bugfix delete mode 100644 changelog.d/3822.misc delete mode 100644 changelog.d/3823.misc delete mode 100644 changelog.d/3824.bugfix delete mode 100644 changelog.d/3826.misc delete mode 100644 changelog.d/3827.misc delete mode 100644 changelog.d/3834.misc delete mode 100644 changelog.d/3835.bugfix delete mode 100644 changelog.d/3840.misc delete mode 100644 changelog.d/3841.bugfix delete mode 100644 changelog.d/3845.bugfix delete mode 100644 changelog.d/3846.feature delete mode 100644 changelog.d/3847.misc delete mode 100644 changelog.d/3851.bugfix delete mode 100644 changelog.d/3853.misc delete mode 100644 changelog.d/3855.misc delete mode 100644 changelog.d/3856.misc delete mode 100644 changelog.d/3857.misc delete mode 100644 changelog.d/3858.misc delete mode 100644 changelog.d/3859.bugfix delete mode 100644 changelog.d/3860.misc delete mode 100644 changelog.d/3871.misc delete mode 100644 changelog.d/3872.misc delete mode 100644 changelog.d/3874.bugfix delete mode 100644 changelog.d/3875.bugfix delete mode 100644 changelog.d/3877.misc delete mode 100644 changelog.d/3888.misc diff --git a/CHANGES.md b/CHANGES.md index ee864c3c63..00dbed7021 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,61 @@ +Synapse 0.33.5rc1 (2018-09-17) +============================== + +Features +-------- + +- Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576)) +- Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790)) +- Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846)) + + +Bugfixes +-------- + +- Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788)) +- guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800)) +- Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804)) +- Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810)) +- Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824)) +- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835)) +- Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841)) +- Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845)) +- Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851)) +- Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859)) +- ([\#3874](https://github.com/matrix-org/synapse/issues/3874)) +- Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875)) + + +Internal Changes +---------------- + +- CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704)) +- http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771)) +- Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789)) +- Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795)) +- handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803)) +- Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805)) +- Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806)) +- tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808)) +- crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822)) +- rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823)) +- add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826)) +- speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827)) +- Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834)) +- Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840)) +- federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847)) +- Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853)) +- Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855)) +- Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856)) +- Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857)) +- Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858)) +- Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860)) +- Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871)) +- Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872)) +- mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877)) +- Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888)) + + Synapse 0.33.4 (2018-09-07) =========================== diff --git a/changelog.d/3576.feature b/changelog.d/3576.feature deleted file mode 100644 index a624780bab..0000000000 --- a/changelog.d/3576.feature +++ /dev/null @@ -1 +0,0 @@ -Python 3.5 and 3.6 support is now in beta. diff --git a/changelog.d/3704.misc b/changelog.d/3704.misc deleted file mode 100644 index aaae0fbd63..0000000000 --- a/changelog.d/3704.misc +++ /dev/null @@ -1 +0,0 @@ -CircleCI tests now run on the potential merge of a PR. diff --git a/changelog.d/3771.misc b/changelog.d/3771.misc deleted file mode 100644 index 47aa34bc04..0000000000 --- a/changelog.d/3771.misc +++ /dev/null @@ -1 +0,0 @@ -http/ is now ported to Python 3. diff --git a/changelog.d/3788.bugfix b/changelog.d/3788.bugfix deleted file mode 100644 index 72316fb881..0000000000 --- a/changelog.d/3788.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove connection ID for replication prometheus metrics, as it creates a large number of new series. diff --git a/changelog.d/3789.misc b/changelog.d/3789.misc deleted file mode 100644 index d2d5d91091..0000000000 --- a/changelog.d/3789.misc +++ /dev/null @@ -1 +0,0 @@ -Improve human readable error messages for threepid registration/account update diff --git a/changelog.d/3790.feature b/changelog.d/3790.feature deleted file mode 100644 index 2c4ac62fb5..0000000000 --- a/changelog.d/3790.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `event_format` filter param in `/sync` diff --git a/changelog.d/3795.misc b/changelog.d/3795.misc deleted file mode 100644 index 9f64ee5e2b..0000000000 --- a/changelog.d/3795.misc +++ /dev/null @@ -1 +0,0 @@ -Make /sync slightly faster by avoiding needless copies diff --git a/changelog.d/3800.bugfix b/changelog.d/3800.bugfix deleted file mode 100644 index 6b2e18b4a6..0000000000 --- a/changelog.d/3800.bugfix +++ /dev/null @@ -1 +0,0 @@ -guest users should not be part of mau total diff --git a/changelog.d/3803.misc b/changelog.d/3803.misc deleted file mode 100644 index 2b60653c29..0000000000 --- a/changelog.d/3803.misc +++ /dev/null @@ -1 +0,0 @@ -handlers/ is now ported to Python 3. diff --git a/changelog.d/3804.bugfix b/changelog.d/3804.bugfix deleted file mode 100644 index a0cef20e3f..0000000000 --- a/changelog.d/3804.bugfix +++ /dev/null @@ -1 +0,0 @@ -Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. diff --git a/changelog.d/3805.misc b/changelog.d/3805.misc deleted file mode 100644 index 257feeb071..0000000000 --- a/changelog.d/3805.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the number of PDUs/EDUs per federation transaction diff --git a/changelog.d/3806.misc b/changelog.d/3806.misc deleted file mode 100644 index 3c722eef2d..0000000000 --- a/changelog.d/3806.misc +++ /dev/null @@ -1 +0,0 @@ -Only start postgres instance for postgres tests on Travis CI diff --git a/changelog.d/3808.misc b/changelog.d/3808.misc deleted file mode 100644 index e5e1cd9e0e..0000000000 --- a/changelog.d/3808.misc +++ /dev/null @@ -1 +0,0 @@ -tests/ is now ported to Python 3. diff --git a/changelog.d/3810.bugfix b/changelog.d/3810.bugfix deleted file mode 100644 index 2b938a81ae..0000000000 --- a/changelog.d/3810.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix existing room tags not coming down sync when joining a room diff --git a/changelog.d/3822.misc b/changelog.d/3822.misc deleted file mode 100644 index 5250f31896..0000000000 --- a/changelog.d/3822.misc +++ /dev/null @@ -1 +0,0 @@ -crypto/ is now ported to Python 3. diff --git a/changelog.d/3823.misc b/changelog.d/3823.misc deleted file mode 100644 index 0da491ddaa..0000000000 --- a/changelog.d/3823.misc +++ /dev/null @@ -1 +0,0 @@ -rest/ is now ported to Python 3. diff --git a/changelog.d/3824.bugfix b/changelog.d/3824.bugfix deleted file mode 100644 index 99f199dcc6..0000000000 --- a/changelog.d/3824.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix jwt import check \ No newline at end of file diff --git a/changelog.d/3826.misc b/changelog.d/3826.misc deleted file mode 100644 index a4d9a012f9..0000000000 --- a/changelog.d/3826.misc +++ /dev/null @@ -1 +0,0 @@ -add some logging for the keyring queue diff --git a/changelog.d/3827.misc b/changelog.d/3827.misc deleted file mode 100644 index bc294706cf..0000000000 --- a/changelog.d/3827.misc +++ /dev/null @@ -1 +0,0 @@ -speed up lazy loading by 2-3x diff --git a/changelog.d/3834.misc b/changelog.d/3834.misc deleted file mode 100644 index 8902f8fba7..0000000000 --- a/changelog.d/3834.misc +++ /dev/null @@ -1 +0,0 @@ -Improved Dockerfile to remove build requirements after building reducing the image size. diff --git a/changelog.d/3835.bugfix b/changelog.d/3835.bugfix deleted file mode 100644 index 00dbcbc8dc..0000000000 --- a/changelog.d/3835.bugfix +++ /dev/null @@ -1 +0,0 @@ -fix VOIP crashes under Python 3 (#3821) diff --git a/changelog.d/3840.misc b/changelog.d/3840.misc deleted file mode 100644 index b9585ae9be..0000000000 --- a/changelog.d/3840.misc +++ /dev/null @@ -1 +0,0 @@ -Disable lazy loading for incremental syncs for now diff --git a/changelog.d/3841.bugfix b/changelog.d/3841.bugfix deleted file mode 100644 index 2a48a7dd66..0000000000 --- a/changelog.d/3841.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix manhole so that it works with latest openssh clients diff --git a/changelog.d/3845.bugfix b/changelog.d/3845.bugfix deleted file mode 100644 index 5b7e8f1934..0000000000 --- a/changelog.d/3845.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix outbound requests occasionally wedging, which can result in federation breaking between servers. diff --git a/changelog.d/3846.feature b/changelog.d/3846.feature deleted file mode 100644 index 453c11d3f8..0000000000 --- a/changelog.d/3846.feature +++ /dev/null @@ -1 +0,0 @@ -Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users diff --git a/changelog.d/3847.misc b/changelog.d/3847.misc deleted file mode 100644 index bf8b5afea4..0000000000 --- a/changelog.d/3847.misc +++ /dev/null @@ -1 +0,0 @@ -federation/ is now ported to Python 3. \ No newline at end of file diff --git a/changelog.d/3851.bugfix b/changelog.d/3851.bugfix deleted file mode 100644 index b53a9efe7b..0000000000 --- a/changelog.d/3851.bugfix +++ /dev/null @@ -1 +0,0 @@ -Show heroes if room name/canonical alias has been deleted diff --git a/changelog.d/3853.misc b/changelog.d/3853.misc deleted file mode 100644 index db45d4983d..0000000000 --- a/changelog.d/3853.misc +++ /dev/null @@ -1 +0,0 @@ -Log when we retry outbound requests diff --git a/changelog.d/3855.misc b/changelog.d/3855.misc deleted file mode 100644 index a25bb020ba..0000000000 --- a/changelog.d/3855.misc +++ /dev/null @@ -1 +0,0 @@ -Removed some excess logging messages. \ No newline at end of file diff --git a/changelog.d/3856.misc b/changelog.d/3856.misc deleted file mode 100644 index 36c311eb3d..0000000000 --- a/changelog.d/3856.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up purge history for rooms that have been previously purged diff --git a/changelog.d/3857.misc b/changelog.d/3857.misc deleted file mode 100644 index e128d193d9..0000000000 --- a/changelog.d/3857.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor some HTTP timeout code. \ No newline at end of file diff --git a/changelog.d/3858.misc b/changelog.d/3858.misc deleted file mode 100644 index 4644db5330..0000000000 --- a/changelog.d/3858.misc +++ /dev/null @@ -1 +0,0 @@ -Fix running merged builds on CircleCI \ No newline at end of file diff --git a/changelog.d/3859.bugfix b/changelog.d/3859.bugfix deleted file mode 100644 index ec5b172464..0000000000 --- a/changelog.d/3859.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix handling of redacted events from federation diff --git a/changelog.d/3860.misc b/changelog.d/3860.misc deleted file mode 100644 index 364239d3e3..0000000000 --- a/changelog.d/3860.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in replication stream exception. diff --git a/changelog.d/3871.misc b/changelog.d/3871.misc deleted file mode 100644 index dd9510ceb6..0000000000 --- a/changelog.d/3871.misc +++ /dev/null @@ -1 +0,0 @@ -Add in flight real time metrics for Measure blocks diff --git a/changelog.d/3872.misc b/changelog.d/3872.misc deleted file mode 100644 index b450c506d8..0000000000 --- a/changelog.d/3872.misc +++ /dev/null @@ -1 +0,0 @@ -Disable buffering and automatic retrying in treq requests to prevent timeouts. \ No newline at end of file diff --git a/changelog.d/3874.bugfix b/changelog.d/3874.bugfix deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/changelog.d/3875.bugfix b/changelog.d/3875.bugfix deleted file mode 100644 index 2d2147dd4b..0000000000 --- a/changelog.d/3875.bugfix +++ /dev/null @@ -1 +0,0 @@ -Mitigate outbound federation randomly becoming wedged diff --git a/changelog.d/3877.misc b/changelog.d/3877.misc deleted file mode 100644 index a80fec4bd8..0000000000 --- a/changelog.d/3877.misc +++ /dev/null @@ -1 +0,0 @@ -mention jemalloc in the README diff --git a/changelog.d/3888.misc b/changelog.d/3888.misc deleted file mode 100644 index a10ede547e..0000000000 --- a/changelog.d/3888.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unmaintained "nuke-room-from-db.sh" script -- cgit 1.4.1 From 2b39494cd5a0a93811024c31d8c41e7b5e9e21a3 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 16:35:18 +0100 Subject: Add python_version phone home stat --- synapse/app/homeserver.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index ac97e19649..3d9d303839 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -457,6 +457,8 @@ def run(hs): stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = (version.major) + (version.minor / 10) + (version.micro / 100) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.4.1 From f75b9961c6c782d2ca4586782aa5cd2c0ae9a5b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:52:02 +0100 Subject: Reinstate missing null check --- synapse/handlers/directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index c745e6740b..18741c5fac 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -333,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator == user_id: + if creator is not None and creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) -- cgit 1.4.1 From 9a1cceeca90e6ae52433d50dfd866d68836f686e Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:09:06 +0100 Subject: Use a string for versions --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3d9d303839..be9be423f0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -458,7 +458,7 @@ def run(hs): stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info - stats["python_version"] = (version.major) + (version.minor / 10) + (version.micro / 100) + stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.4.1 From ac80cb08fe247812b46c70843b2d5c9764cca619 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 17:16:50 +0100 Subject: Fix more b'abcd' noise in metrics --- synapse/http/request_metrics.py | 22 +++++++++++----------- synapse/http/site.py | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 72c2654678..fedb4e6b18 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -162,7 +162,7 @@ class RequestMetrics(object): with _in_flight_requests_lock: _in_flight_requests.add(self) - def stop(self, time_sec, request): + def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) @@ -179,35 +179,35 @@ class RequestMetrics(object): ) return - response_code = str(request.code) + response_code = str(response_code) - outgoing_responses_counter.labels(request.method, response_code).inc() + outgoing_responses_counter.labels(self.method, response_code).inc() - response_count.labels(request.method, self.name, tag).inc() + response_count.labels(self.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag, response_code).observe( + response_timer.labels(self.method, self.name, tag, response_code).observe( time_sec - self.start ) resource_usage = context.get_resource_usage() - response_ru_utime.labels(request.method, self.name, tag).inc( + response_ru_utime.labels(self.method, self.name, tag).inc( resource_usage.ru_utime, ) - response_ru_stime.labels(request.method, self.name, tag).inc( + response_ru_stime.labels(self.method, self.name, tag).inc( resource_usage.ru_stime, ) - response_db_txn_count.labels(request.method, self.name, tag).inc( + response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count ) - response_db_txn_duration.labels(request.method, self.name, tag).inc( + response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec ) - response_db_sched_duration.labels(request.method, self.name, tag).inc( + response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec ) - response_size.labels(request.method, self.name, tag).inc(request.sentLength) + response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in diff --git a/synapse/http/site.py b/synapse/http/site.py index 2191de9836..9579e8cd0d 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -288,7 +288,7 @@ class SynapseRequest(Request): ) try: - self.request_metrics.stop(self.finish_time, self) + self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: logger.warn("Failed to stop metrics: %r", e) -- cgit 1.4.1 From 06f2dbbb5dbe7cf9efe66376f47008e86a180f3b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 17:18:26 +0100 Subject: changelog --- changelog.d/3895.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3895.bugfix diff --git a/changelog.d/3895.bugfix b/changelog.d/3895.bugfix new file mode 100644 index 0000000000..8b30afab04 --- /dev/null +++ b/changelog.d/3895.bugfix @@ -0,0 +1 @@ +Fix some b'abcd' noise in logs and metrics -- cgit 1.4.1 From b58714789fa21fb6907d8c46d3155472d09bc8e6 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:35:54 +0100 Subject: make pip happy? --- synapse/app/homeserver.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index be9be423f0..ea57350d46 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit -from synapse.util.versionstring import get_version_string +from synapse.util.\\\\string import get_version_string logger = logging.getLogger("synapse.app.homeserver") @@ -458,7 +458,9 @@ def run(hs): stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info - stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.4.1 From 5baa087312f056fa3730581d66f220f49eed8fdd Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:37:56 +0100 Subject: typo --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index ea57350d46..3241ded188 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit -from synapse.util.\\\\string import get_version_string +from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.homeserver") -- cgit 1.4.1 From 2b41f2ed76a0dca6bc6f578eba9eb6ad011c4cd4 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:48:48 +0100 Subject: Create 3894.feature --- changelog.d/3894.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3894.feature diff --git a/changelog.d/3894.feature b/changelog.d/3894.feature new file mode 100644 index 0000000000..1ed0cccdb2 --- /dev/null +++ b/changelog.d/3894.feature @@ -0,0 +1 @@ +Report "python_version" in the phone home stats -- cgit 1.4.1 From 4ecdf73fc7a9e1b8357c6f74897b33b32243dd3f Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 17 Sep 2018 22:05:23 -0500 Subject: Fix typo in README, synaspse -> synapse Signed-off-by: Aaron Raimist --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 0147429aed..9c0f9c09c8 100644 --- a/README.rst +++ b/README.rst @@ -968,7 +968,7 @@ improvement in overall amount, and especially in terms of giving back RAM to the OS. To use it, the library must simply be put in the LD_PRELOAD environment variable when launching Synapse. On Debian, this can be done by installing the ``libjemalloc1`` package and adding this line to -``/etc/default/matrix-synaspse``:: +``/etc/default/matrix-synapse``:: LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 -- cgit 1.4.1 From 505abb38f04b8b18adb0b5e5a150238f76b3e4fe Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 17 Sep 2018 22:07:19 -0500 Subject: Add changelog Signed-off-by: Aaron Raimist --- changelog.d/3897.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3897.misc diff --git a/changelog.d/3897.misc b/changelog.d/3897.misc new file mode 100644 index 0000000000..87e7ac796e --- /dev/null +++ b/changelog.d/3897.misc @@ -0,0 +1 @@ +Fix typo in README, synaspse -> synapse \ No newline at end of file -- cgit 1.4.1 From 550007cb0eb12c7ddf0510afdb23195c295de396 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 15:02:51 +0100 Subject: Bump timeout on get_missing_events request --- synapse/handlers/federation.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0c68e8a472..f10b46414b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -360,6 +360,35 @@ class FederationHandler(BaseHandler): # apparently. # # see https://github.com/matrix-org/synapse/pull/1744 + # + # ---- + # + # Update richvdh 2018/09/18: There are a number of problems with timing this + # request out agressively on the client side: + # + # - it plays badly with the server-side rate-limiter, which starts tarpitting you + # if you send too many requests at once, so you end up with the server carefully + # working through the backlog of your requests, which you have already timed + # out. + # + # - for this request in particular, we now (as of + # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the + # server can't produce a plausible-looking set of prev_events - so we becone + # much more likely to reject the event. + # + # - contrary to what it says above, we do *not* fall back to fetching fresh state + # for the room if get_missing_events times out. Rather, we give up processing + # the PDU whose prevs we are missing, which then makes it much more likely that + # we'll end up back here for the *next* PDU in the list, which exacerbates the + # problem. + # + # - the agressive 10s timeout was introduced to deal with incoming federation + # requests taking 8 hours to process. It's not entirely clear why that was going + # on; certainly there were other issues causing traffic storms which are now + # resolved, and I think in any case we may be more sensible about our locking + # now. We're *certainly* more sensible about our logging. + # + # All that said: Let's try increasing the timout to 60s and see what happens. missing_events = yield self.federation_client.get_missing_events( origin, @@ -368,7 +397,7 @@ class FederationHandler(BaseHandler): latest_events=[pdu], limit=10, min_depth=min_depth, - timeout=10000, + timeout=60000, ) logger.info( -- cgit 1.4.1 From 8565d9a9fe26d445caaa9ab22d326badc6b0150b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 15:05:26 +0100 Subject: changelog --- changelog.d/3903.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3903.misc diff --git a/changelog.d/3903.misc b/changelog.d/3903.misc new file mode 100644 index 0000000000..49b64bf333 --- /dev/null +++ b/changelog.d/3903.misc @@ -0,0 +1 @@ +Increase the timeout when filling missing events in federation requests \ No newline at end of file -- cgit 1.4.1 From b3097396e7cf8d29255b4855e9bf06c6b98a549e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 17:01:12 +0100 Subject: Set SNI to the server_name, not whatever was in the SRV record Fixes #3843 --- synapse/http/endpoint.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index b0c9369519..91025037a3 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -108,7 +108,7 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= Args: reactor: Twisted reactor. - destination (bytes): The name of the server to connect to. + destination (unicode): The name of the server to connect to. tls_client_options_factory (synapse.crypto.context_factory.ClientTLSOptionsFactory): Factory which generates TLS options for client connections. @@ -126,10 +126,17 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= transport_endpoint = HostnameEndpoint default_port = 8008 else: + # the SNI string should be the same as the Host header, minus the port. + # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777, + # the Host header and SNI should therefore be the server_name of the remote + # server. + tls_options = tls_client_options_factory.get_options(domain) + def transport_endpoint(reactor, host, port, timeout): return wrapClientTLS( - tls_client_options_factory.get_options(host), - HostnameEndpoint(reactor, host, port, timeout=timeout)) + tls_options, + HostnameEndpoint(reactor, host, port, timeout=timeout), + ) default_port = 8448 if port is None: -- cgit 1.4.1 From edabc18938c63a7c6ed16840e22be25a243dcff2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 17:04:20 +0100 Subject: changelog --- changelog.d/3907.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3907.bugfix diff --git a/changelog.d/3907.bugfix b/changelog.d/3907.bugfix new file mode 100644 index 0000000000..45e010c052 --- /dev/null +++ b/changelog.d/3907.bugfix @@ -0,0 +1 @@ +Fix incorrect server-name indication for outgoing federation requests \ No newline at end of file -- cgit 1.4.1 From 31c15dcb80c8f11fd03dbb9b0ccff4777dc8e457 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Sep 2018 18:17:15 +0100 Subject: Refactor matrixfederationclient to fix logging (#3906) We want to wait until we have read the response body before we log the request as complete, otherwise a confusing thing happens where the request appears to have completed, but we later fail it. To do this, we factor the salient details of a request out to a separate object, which can then keep track of the txn_id, so that it can be logged. --- changelog.d/3906.misc | 1 + synapse/http/matrixfederationclient.py | 385 +++++++++++++++++++------------ tests/http/test_fedclient.py | 43 +++- tests/replication/slave/storage/_base.py | 35 +-- tests/server.py | 81 +++++++ 5 files changed, 359 insertions(+), 186 deletions(-) create mode 100644 changelog.d/3906.misc diff --git a/changelog.d/3906.misc b/changelog.d/3906.misc new file mode 100644 index 0000000000..11709186d3 --- /dev/null +++ b/changelog.d/3906.misc @@ -0,0 +1 @@ +Improve logging of outbound federation requests \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 083484a687..6a2d447289 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,10 +17,12 @@ import cgi import logging import random import sys +from io import BytesIO from six import PY3, string_types from six.moves import urllib +import attr import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter @@ -28,8 +30,9 @@ from signedjson.sign import sign_json from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError +from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool +from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool from twisted.web.http_headers import Headers import synapse.metrics @@ -41,13 +44,11 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util import logcontext from synapse.util.async_helpers import timeout_no_seriously from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure logger = logging.getLogger(__name__) -outbound_logger = logging.getLogger("synapse.http.outbound") outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) @@ -78,6 +79,93 @@ class MatrixFederationEndpointFactory(object): ) +_next_id = 1 + + +@attr.s +class MatrixFederationRequest(object): + method = attr.ib() + """HTTP method + :type: str + """ + + path = attr.ib() + """HTTP path + :type: str + """ + + destination = attr.ib() + """The remote server to send the HTTP request to. + :type: str""" + + json = attr.ib(default=None) + """JSON to send in the body. + :type: dict|None + """ + + json_callback = attr.ib(default=None) + """A callback to generate the JSON. + :type: func|None + """ + + query = attr.ib(default=None) + """Query arguments. + :type: dict|None + """ + + txn_id = attr.ib(default=None) + """Unique ID for this request (for logging) + :type: str|None + """ + + def __attrs_post_init__(self): + global _next_id + self.txn_id = "%s-O-%s" % (self.method, _next_id) + _next_id = (_next_id + 1) % (MAXINT - 1) + + def get_json(self): + if self.json_callback: + return self.json_callback() + return self.json + + +@defer.inlineCallbacks +def _handle_json_response(reactor, timeout_sec, request, response): + """ + Reads the JSON body of a response, with a timeout + + Args: + reactor (IReactor): twisted reactor, for the timeout + timeout_sec (float): number of seconds to wait for response to complete + request (MatrixFederationRequest): the request that triggered the response + response (IResponse): response to the request + + Returns: + dict: parsed JSON response + """ + try: + check_content_type_is_json(response.headers) + d = treq.json_content(response) + d.addTimeout(timeout_sec, reactor) + body = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%d] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) + raise + logger.info( + "{%s} [%d] Completed: %d %s", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) + defer.returnValue(body) + + class MatrixFederationHttpClient(object): """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. @@ -102,34 +190,35 @@ class MatrixFederationHttpClient(object): self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string = hs.version_string.encode('ascii') - self._next_id = 1 self.default_timeout = 60 - def _create_url(self, destination, path_bytes, param_bytes, query_bytes): - return urllib.parse.urlunparse( - (b"matrix", destination, path_bytes, param_bytes, query_bytes, b"") - ) + def schedule(x): + reactor.callLater(_EPSILON, x) + + self._cooperator = Cooperator(scheduler=schedule) @defer.inlineCallbacks - def _request(self, destination, method, path, - json=None, json_callback=None, - param_bytes=b"", - query=None, retry_on_dns_fail=True, - timeout=None, long_retries=False, - ignore_backoff=False, - backoff_on_404=False): + def _send_request( + self, + request, + retry_on_dns_fail=True, + timeout=None, + long_retries=False, + ignore_backoff=False, + backoff_on_404=False + ): """ - Creates and sends a request to the given server. + Sends a request to the given server. Args: - destination (str): The remote server to send the HTTP request to. - method (str): HTTP method - path (str): The HTTP path - json (dict or None): JSON to send in the body. - json_callback (func or None): A callback to generate the JSON. - query (dict or None): Query arguments. + request (MatrixFederationRequest): details of request to be sent + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server). 60s by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + backoff_on_404 (bool): Back off if we get a 404 Returns: @@ -154,38 +243,32 @@ class MatrixFederationHttpClient(object): if ( self.hs.config.federation_domain_whitelist is not None and - destination not in self.hs.config.federation_domain_whitelist + request.destination not in self.hs.config.federation_domain_whitelist ): - raise FederationDeniedError(destination) + raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( - destination, + request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) - headers_dict = {} - path_bytes = path.encode("ascii") - if query: - query_bytes = encode_query_args(query) + method = request.method + destination = request.destination + path_bytes = request.path.encode("ascii") + if request.query: + query_bytes = encode_query_args(request.query) else: query_bytes = b"" headers_dict = { "User-Agent": [self.version_string], - "Host": [destination], + "Host": [request.destination], } with limiter: - url = self._create_url( - destination.encode("ascii"), path_bytes, param_bytes, query_bytes - ).decode('ascii') - - txn_id = "%s-O-%s" % (method, self._next_id) - self._next_id = (self._next_id + 1) % (MAXINT - 1) - # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -193,16 +276,19 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - http_url = urllib.parse.urlunparse( - (b"", b"", path_bytes, param_bytes, query_bytes, b"") - ).decode('ascii') + url = urllib.parse.urlunparse(( + b"matrix", destination.encode("ascii"), + path_bytes, None, query_bytes, b"", + )).decode('ascii') + + http_url = urllib.parse.urlunparse(( + b"", b"", + path_bytes, None, query_bytes, b"", + )).decode('ascii') - log_result = None while True: try: - if json_callback: - json = json_callback() - + json = request.get_json() if json: data = encode_canonical_json(json) headers_dict["Content-Type"] = ["application/json"] @@ -213,16 +299,24 @@ class MatrixFederationHttpClient(object): data = None self.sign_request(destination, method, http_url, headers_dict) - outbound_logger.info( + logger.info( "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url + request.txn_id, destination, method, url ) + if data: + producer = FileBodyProducer( + BytesIO(data), + cooperator=self._cooperator + ) + else: + producer = None + request_deferred = treq.request( method, url, headers=Headers(headers_dict), - data=data, + data=producer, agent=self.agent, reactor=self.hs.get_reactor(), unbuffered=True @@ -244,33 +338,19 @@ class MatrixFederationHttpClient(object): request_deferred, ) - log_result = "%d %s" % ( - response.code, - response.phrase.decode('ascii', errors='replace'), - ) break except Exception as e: - if not retry_on_dns_fail and isinstance(e, DNSLookupError): - logger.warn( - "DNS Lookup failed to %s with %s", - destination, - e - ) - log_result = "DNS Lookup failed to %s with %s" % ( - destination, e - ) - raise - logger.warn( - "{%s} Sending request failed to %s: %s %s: %s", - txn_id, + "{%s} [%s] Request failed: %s %s: %s", + request.txn_id, destination, method, url, _flatten_response_never_received(e), ) - log_result = _flatten_response_never_received(e) + if not retry_on_dns_fail and isinstance(e, DNSLookupError): + raise if retries_left and not timeout: if long_retries: @@ -283,33 +363,33 @@ class MatrixFederationHttpClient(object): delay *= random.uniform(0.8, 1.4) logger.debug( - "{%s} Waiting %s before sending to %s...", - txn_id, + "{%s} [%s] Waiting %ss before re-sending...", + request.txn_id, + destination, delay, - destination ) yield self.clock.sleep(delay) retries_left -= 1 else: raise - finally: - outbound_logger.info( - "{%s} [%s] Result: %s", - txn_id, - destination, - log_result, - ) + + logger.info( + "{%s} [%s] Got response headers: %d %s", + request.txn_id, + destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? - with logcontext.PreserveLoggingContext(): - d = treq.content(response) - d.addTimeout(_sec_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + d = treq.content(response) + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body ) @@ -403,29 +483,26 @@ class MatrixFederationHttpClient(object): is not on our federation whitelist """ - if not json_data_callback: - json_data_callback = lambda: data - - response = yield self._request( - destination, - "PUT", - path, - json_callback=json_data_callback, + request = MatrixFederationRequest( + method="PUT", + destination=destination, + path=path, query=args, + json_callback=json_data_callback, + json=data, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, backoff_on_404=backoff_on_404, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -459,31 +536,30 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "POST", - path, + + request = MatrixFederationRequest( + method="POST", + destination=destination, + path=path, query=args, json=data, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - if timeout: - _sec_timeout = timeout / 1000 - else: - _sec_timeout = self.default_timeout - - d.addTimeout(_sec_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = yield _handle_json_response( + self.hs.get_reactor(), _sec_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -519,25 +595,23 @@ class MatrixFederationHttpClient(object): logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - response = yield self._request( - destination, - "GET", - path, + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) - + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -568,25 +642,23 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "DELETE", - path, + request = MatrixFederationRequest( + method="DELETE", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) - + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -614,11 +686,15 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "GET", - path, + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff, ) @@ -626,14 +702,25 @@ class MatrixFederationHttpClient(object): headers = dict(response.headers.getAllRawHeaders()) try: - with logcontext.PreserveLoggingContext(): - d = _readBodyToFile(response, output_stream, max_size) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - length = yield make_deferred_yieldable(d) - except Exception: - logger.exception("Failed to download body") + d = _readBodyToFile(response, output_stream, max_size) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + length = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%d] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) raise - + logger.info( + "{%s} [%d] Completed: %d %s [%d bytes]", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + length, + ) defer.returnValue((length, headers)) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 1c46c9cfeb..66c09f63b6 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -18,9 +18,14 @@ from mock import Mock from twisted.internet.defer import TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError from twisted.web.client import ResponseNeverReceived +from twisted.web.http import HTTPChannel -from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.http.matrixfederationclient import ( + MatrixFederationHttpClient, + MatrixFederationRequest, +) +from tests.server import FakeTransport from tests.unittest import HomeserverTestCase @@ -40,7 +45,7 @@ class FederationClientTests(HomeserverTestCase): """ If the DNS raising returns an error, it will bubble up. """ - d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000) self.pump() f = self.failureResultOf(d) @@ -51,7 +56,7 @@ class FederationClientTests(HomeserverTestCase): If the HTTP request is not connected and is timed out, it'll give a ConnectingCancelledError. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) self.pump() @@ -78,7 +83,7 @@ class FederationClientTests(HomeserverTestCase): If the HTTP request is connected, but gets no response before being timed out, it'll give a ResponseNeverReceived. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) self.pump() @@ -108,7 +113,12 @@ class FederationClientTests(HomeserverTestCase): """ Once the client gets the headers, _request returns successfully. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + request = MatrixFederationRequest( + method="GET", + destination="testserv:8008", + path="foo/bar", + ) + d = self.cl._send_request(request, timeout=10000) self.pump() @@ -155,3 +165,26 @@ class FederationClientTests(HomeserverTestCase): f = self.failureResultOf(d) self.assertIsInstance(f.value, TimeoutError) + + def test_client_sends_body(self): + self.cl.post_json( + "testserv:8008", "foo/bar", timeout=10000, + data={"a": "b"} + ) + + self.pump() + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + client = clients[0][2].buildProtocol(None) + server = HTTPChannel() + + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + self.pump(0.1) + + self.assertEqual(len(server.requests), 1) + request = server.requests[0] + content = request.content.read() + self.assertEqual(content, b'{"a":"b"}') diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 089cecfbee..9e9fbbfe93 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -15,8 +15,6 @@ from mock import Mock, NonCallableMock -import attr - from synapse.replication.tcp.client import ( ReplicationClientFactory, ReplicationClientHandler, @@ -24,6 +22,7 @@ from synapse.replication.tcp.client import ( from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from tests import unittest +from tests.server import FakeTransport class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): @@ -56,36 +55,8 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) - @attr.s - class FakeTransport(object): - - other = attr.ib() - disconnecting = False - buffer = attr.ib(default=b'') - - def registerProducer(self, producer, streaming): - - self.producer = producer - - def _produce(): - self.producer.resumeProducing() - reactor.callLater(0.1, _produce) - - reactor.callLater(0.0, _produce) - - def write(self, byt): - self.buffer = self.buffer + byt - - if getattr(self.other, "transport") is not None: - self.other.dataReceived(self.buffer) - self.buffer = b"" - - def writeSequence(self, seq): - for x in seq: - self.write(x) - - client.makeConnection(FakeTransport(server)) - server.makeConnection(FakeTransport(client)) + client.makeConnection(FakeTransport(server, reactor)) + server.makeConnection(FakeTransport(client, reactor)) def replicate(self): """Tell the master side of replication that something has happened, and then diff --git a/tests/server.py b/tests/server.py index 420ec4e088..ccea3baa55 100644 --- a/tests/server.py +++ b/tests/server.py @@ -280,3 +280,84 @@ def get_clock(): clock = ThreadedMemoryReactorClock() hs_clock = Clock(clock) return (clock, hs_clock) + + +@attr.s +class FakeTransport(object): + """ + A twisted.internet.interfaces.ITransport implementation which sends all its data + straight into an IProtocol object: it exists to connect two IProtocols together. + + To use it, instantiate it with the receiving IProtocol, and then pass it to the + sending IProtocol's makeConnection method: + + server = HTTPChannel() + client.makeConnection(FakeTransport(server, self.reactor)) + + If you want bidirectional communication, you'll need two instances. + """ + + other = attr.ib() + """The Protocol object which will receive any data written to this transport. + + :type: twisted.internet.interfaces.IProtocol + """ + + _reactor = attr.ib() + """Test reactor + + :type: twisted.internet.interfaces.IReactorTime + """ + + disconnecting = False + buffer = attr.ib(default=b'') + producer = attr.ib(default=None) + + def getPeer(self): + return None + + def getHost(self): + return None + + def loseConnection(self): + self.disconnecting = True + + def abortConnection(self): + self.disconnecting = True + + def pauseProducing(self): + self.producer.pauseProducing() + + def unregisterProducer(self): + if not self.producer: + return + + self.producer = None + + def registerProducer(self, producer, streaming): + self.producer = producer + self.producerStreaming = streaming + + def _produce(): + d = self.producer.resumeProducing() + d.addCallback(lambda x: self._reactor.callLater(0.1, _produce)) + + if not streaming: + self._reactor.callLater(0.0, _produce) + + def write(self, byt): + self.buffer = self.buffer + byt + + def _write(): + if getattr(self.other, "transport") is not None: + self.other.dataReceived(self.buffer) + self.buffer = b"" + return + + self._reactor.callLater(0.0, _write) + + _write() + + def writeSequence(self, seq): + for x in seq: + self.write(x) -- cgit 1.4.1 From a219ce87263ad9be887cf039a04b4a1f06b7b0b8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Sep 2018 18:27:37 +0100 Subject: Use directory server for room joins (#3899) When we do a join, always try the server we used for the alias lookup first. Fixes #2418 --- changelog.d/3899.bugfix | 1 + synapse/handlers/room_member.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/3899.bugfix diff --git a/changelog.d/3899.bugfix b/changelog.d/3899.bugfix new file mode 100644 index 0000000000..5120e3a823 --- /dev/null +++ b/changelog.d/3899.bugfix @@ -0,0 +1 @@ +When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index f643619047..07fd3e82fc 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -583,6 +583,11 @@ class RoomMemberHandler(object): room_id = mapping["room_id"] servers = mapping["servers"] + # put the server which owns the alias at the front of the server list. + if room_alias.domain in servers: + servers.remove(room_alias.domain) + servers.insert(0, room_alias.domain) + defer.returnValue((RoomID.from_string(room_id), servers)) @defer.inlineCallbacks -- cgit 1.4.1 From 35aec19f0ac88e7b16bc6bcc4f4e546b7fc89589 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 18 Sep 2018 15:26:08 -0600 Subject: Destination is a string --- synapse/http/matrixfederationclient.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 6a2d447289..530c0245a9 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -150,14 +150,14 @@ def _handle_json_response(reactor, timeout_sec, request, response): body = yield make_deferred_yieldable(d) except Exception as e: logger.warn( - "{%s} [%d] Error reading response: %s", + "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( - "{%s} [%d] Completed: %d %s", + "{%s} [%s] Completed: %d %s", request.txn_id, request.destination, response.code, @@ -707,14 +707,14 @@ class MatrixFederationHttpClient(object): length = yield make_deferred_yieldable(d) except Exception as e: logger.warn( - "{%s} [%d] Error reading response: %s", + "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( - "{%s} [%d] Completed: %d %s [%d bytes]", + "{%s} [%s] Completed: %d %s [%d bytes]", request.txn_id, request.destination, response.code, -- cgit 1.4.1 From 13b31a9bafcecac13b4c8699f054a1cb5aa67cdd Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 18 Sep 2018 15:30:25 -0600 Subject: Changelog --- changelog.d/3909.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3909.misc diff --git a/changelog.d/3909.misc b/changelog.d/3909.misc new file mode 100644 index 0000000000..a8d72dacad --- /dev/null +++ b/changelog.d/3909.misc @@ -0,0 +1 @@ +Fix log format error in matrixfederationclient.py -- cgit 1.4.1 From 3f0d8e6b09d33a026df3ef403e1c8a264637d71c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 19 Sep 2018 18:14:30 +1000 Subject: Remove documentation referencing Cygwin (#3873) --- .gitignore | 1 + README.rst | 38 +++++++------------------------------- changelog.d/3873.misc | 2 ++ 3 files changed, 10 insertions(+), 31 deletions(-) create mode 100644 changelog.d/3873.misc diff --git a/.gitignore b/.gitignore index 1718185384..a725725235 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ .DS_Store _trial_temp/ +_trial_temp*/ logs/ dbs/ *.egg diff --git a/README.rst b/README.rst index 9c0f9c09c8..5547f617ba 100644 --- a/README.rst +++ b/README.rst @@ -157,7 +157,7 @@ if you prefer. In case of problems, please see the _`Troubleshooting` section below. -There is an offical synapse image available at +There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker `_. Further information on this including configuration options is available in the README on @@ -459,37 +459,13 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix- Windows Install --------------- -Synapse can be installed on Cygwin. It requires the following Cygwin packages: - -- gcc -- git -- libffi-devel -- openssl (and openssl-devel, python-openssl) -- python -- python-setuptools - -The content repository requires additional packages and will be unable to process -uploads without them: - -- libjpeg8 -- libjpeg8-devel -- zlib - -If you choose to install Synapse without these packages, you will need to reinstall -``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install -pillow --user`` - -Troubleshooting: - -- You may need to upgrade ``setuptools`` to get this to work correctly: - ``pip install setuptools --upgrade``. -- You may encounter errors indicating that ``ffi.h`` is missing, even with - ``libffi-devel`` installed. If you do, copy the ``.h`` files: - ``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include`` -- You may need to install libsodium from source in order to install PyNacl. If - you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find - it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a`` +If you wish to run or develop Synapse on Windows, the Windows Subsystem For +Linux provides a Linux environment on Windows 10 which is capable of using the +Debian, Fedora, or source installation methods. More information about WSL can +be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for +Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server +for Windows Server. Troubleshooting =============== diff --git a/changelog.d/3873.misc b/changelog.d/3873.misc new file mode 100644 index 0000000000..8104b5c085 --- /dev/null +++ b/changelog.d/3873.misc @@ -0,0 +1,2 @@ +Remove documentation regarding installation on Cygwin, the use of WSL is +recommended instead. -- cgit 1.4.1 From 05b9937cd77bd02dc8f38d91ffda3db982a2665f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 19 Sep 2018 09:17:54 +0100 Subject: update changelog for #3909 --- changelog.d/3909.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3909.misc b/changelog.d/3909.misc index a8d72dacad..11709186d3 100644 --- a/changelog.d/3909.misc +++ b/changelog.d/3909.misc @@ -1 +1 @@ -Fix log format error in matrixfederationclient.py +Improve logging of outbound federation requests \ No newline at end of file -- cgit 1.4.1 From a334e1cacec5dd9c1a983415906d8ffea4a30134 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 10:39:40 +0100 Subject: Update to use new timeout function everywhere. The existing deferred timeout helper function (and the one into twisted) suffer from a bug when a deferred's canceller throws an exception, #3842. The new helper function doesn't suffer from this problem. --- synapse/http/client.py | 4 +- synapse/http/matrixfederationclient.py | 25 +++++++----- synapse/notifier.py | 13 +++--- synapse/util/async_helpers.py | 73 +++++++++------------------------- 4 files changed, 43 insertions(+), 72 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index ec339a92ad..3d05f83b8c 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -43,7 +43,7 @@ from twisted.web.http_headers import Headers from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import cancelled_to_request_timed_out_error, redact_uri from synapse.http.endpoint import SpiderEndpoint -from synapse.util.async_helpers import add_timeout_to_deferred +from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable @@ -99,7 +99,7 @@ class SimpleHttpClient(object): request_deferred = treq.request( method, uri, agent=self.agent, data=data, headers=headers ) - add_timeout_to_deferred( + request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 530c0245a9..14b12cd1c4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -44,7 +44,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util.async_helpers import timeout_no_seriously +from synapse.util.async_helpers import timeout_deferred from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure @@ -145,8 +145,14 @@ def _handle_json_response(reactor, timeout_sec, request, response): """ try: check_content_type_is_json(response.headers) + d = treq.json_content(response) - d.addTimeout(timeout_sec, reactor) + d = timeout_deferred( + d, + timeout=timeout_sec, + reactor=reactor, + ) + body = yield make_deferred_yieldable(d) except Exception as e: logger.warn( @@ -321,15 +327,10 @@ class MatrixFederationHttpClient(object): reactor=self.hs.get_reactor(), unbuffered=True ) - request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) - # Sometimes the timeout above doesn't work, so lets hack yet - # another layer of timeouts in in the vain hope that at some - # point the world made sense and this really really really - # should work. - request_deferred = timeout_no_seriously( + request_deferred = timeout_deferred( request_deferred, - timeout=_sec_timeout * 2, + timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) @@ -388,7 +389,11 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? d = treq.content(response) - d.addTimeout(_sec_timeout, self.hs.get_reactor()) + d = timeout_deferred( + d, + timeout=_sec_timeout, + reactor=self.hs.get_reactor(), + ) body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body diff --git a/synapse/notifier.py b/synapse/notifier.py index 82f391481c..2d683718fb 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -28,7 +28,7 @@ from synapse.types import StreamToken from synapse.util.async_helpers import ( DeferredTimeoutError, ObservableDeferred, - add_timeout_to_deferred, + timeout_deferred, ) from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.logutils import log_function @@ -337,7 +337,7 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - add_timeout_to_deferred( + listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000., self.hs.get_reactor(), @@ -559,11 +559,12 @@ class Notifier(object): if end_time <= now: break - add_timeout_to_deferred( - listener.deferred.addTimeout, - (end_time - now) / 1000., - self.hs.get_reactor(), + listener.deferred = timeout_deferred( + listener.deferred, + timeout=(end_time - now) / 1000., + reactor=self.hs.get_reactor(), ) + try: with PreserveLoggingContext(): yield listener.deferred diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 40c2946129..2e12bcda98 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -380,23 +380,25 @@ class DeferredTimeoutError(Exception): """ -def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): - """ - Add a timeout to a deferred by scheduling it to be cancelled after - timeout seconds. +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise DeferredTimeoutError(timeout, "Deferred") + return value - This is essentially a backport of deferred.addTimeout, which was introduced - in twisted 16.5. - If the deferred gets timed out, it errbacks with a DeferredTimeoutError, - unless a cancelable function was passed to its initialization or unless - a different on_timeout_cancel callable is provided. +def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): + """The in built twisted `Deferred.addTimeout` fails to time out deferreds + that have a canceller that throws exceptions. This method creates a new + deferred that wraps and times out the given deferred, correctly handling + the case where the given deferred's canceller throws. - Args: - deferred (defer.Deferred): deferred to be timed out - timeout (Number): seconds to time out after - reactor (twisted.internet.reactor): the Twisted reactor to use + NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred + Args: + deferred (Deferred) + timeout (float): Timeout in seconds + reactor (twisted.internet.reactor): The twisted reactor to use on_timeout_cancel (callable): A callable which is called immediately after the deferred times out, and not if this deferred is otherwise cancelled before the timeout. @@ -407,47 +409,9 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): The default callable (if none is provided) will translate a CancelledError Failure into a DeferredTimeoutError. - """ - timed_out = [False] - def time_it_out(): - timed_out[0] = True - deferred.cancel() - - delayed_call = reactor.callLater(timeout, time_it_out) - - def convert_cancelled(value): - if timed_out[0]: - to_call = on_timeout_cancel or _cancelled_to_timed_out_error - return to_call(value, timeout) - return value - - deferred.addBoth(convert_cancelled) - - def cancel_timeout(result): - # stop the pending call to cancel the deferred if it's been fired - if delayed_call.active(): - delayed_call.cancel() - return result - - deferred.addBoth(cancel_timeout) - - -def _cancelled_to_timed_out_error(value, timeout): - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise DeferredTimeoutError(timeout, "Deferred") - return value - - -def timeout_no_seriously(deferred, timeout, reactor): - """The in build twisted deferred addTimeout (and the method above) - completely fail to time things out under some unknown circumstances. - - Lets try a different way of timing things out and maybe that will make - things work?! - - TODO: Kill this with fire. + Returns: + Deferred """ new_d = defer.Deferred() @@ -466,7 +430,8 @@ def timeout_no_seriously(deferred, timeout, reactor): def convert_cancelled(value): if timed_out[0]: - return _cancelled_to_timed_out_error(value, timeout) + to_call = on_timeout_cancel or _cancelled_to_timed_out_error + return to_call(value, timeout) return value deferred.addBoth(convert_cancelled) -- cgit 1.4.1 From 6bbe3d5732630ad7f58159d267159bda2b5bfb08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 10:43:13 +0100 Subject: Newsfile --- changelog.d/3910 | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3910 diff --git a/changelog.d/3910 b/changelog.d/3910 new file mode 100644 index 0000000000..22ec2adc33 --- /dev/null +++ b/changelog.d/3910 @@ -0,0 +1 @@ +Fix bug where things occaisonally were not being timed out correctly. -- cgit 1.4.1 From 6c48aa0256ce7dfd62e72a77b03f5bb2222473ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:04:42 +0100 Subject: Run canceller first to allow it to generate correct error --- synapse/util/async_helpers.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2e12bcda98..2b2e85ecb7 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -421,11 +421,14 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): def time_it_out(): timed_out[0] = True + try: + deferred.cancel() + except: # noqa: E722, if we throw any exception it'll break time outs + logger.exception("Canceller failed during timeout") + if not new_d.called: new_d.errback(DeferredTimeoutError(timeout, "Deferred")) - deferred.cancel() - delayed_call = reactor.callLater(timeout, time_it_out) def convert_cancelled(value): -- cgit 1.4.1 From 9407bcf37a99ebb72eef73e19632cdcf5964c968 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:05:21 +0100 Subject: Replace custom DeferredTimeoutError with defer.TimeoutError --- synapse/notifier.py | 5 ++--- synapse/util/async_helpers.py | 12 +++--------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index 2d683718fb..2e7525b7d2 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -26,7 +26,6 @@ from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge from synapse.types import StreamToken from synapse.util.async_helpers import ( - DeferredTimeoutError, ObservableDeferred, timeout_deferred, ) @@ -354,7 +353,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break @@ -568,7 +567,7 @@ class Notifier(object): try: with PreserveLoggingContext(): yield listener.deferred - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2b2e85ecb7..ec7b2c9672 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -374,16 +374,10 @@ class ReadWriteLock(object): defer.returnValue(_ctx_manager()) -class DeferredTimeoutError(Exception): - """ - This error is raised by default when a L{Deferred} times out. - """ - - def _cancelled_to_timed_out_error(value, timeout): if isinstance(value, failure.Failure): value.trap(CancelledError) - raise DeferredTimeoutError(timeout, "Deferred") + raise defer.TimeoutError(timeout, "Deferred") return value @@ -408,7 +402,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): the timeout. The default callable (if none is provided) will translate a - CancelledError Failure into a DeferredTimeoutError. + CancelledError Failure into a defer.TimeoutError. Returns: Deferred @@ -427,7 +421,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): logger.exception("Canceller failed during timeout") if not new_d.called: - new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + new_d.errback(defer.TimeoutError(timeout, "Deferred")) delayed_call = reactor.callLater(timeout, time_it_out) -- cgit 1.4.1 From 80d2d50f4764d6662ceef86f834f845b539263e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:19:47 +0100 Subject: Fixup --- changelog.d/3910 | 1 - changelog.d/3910.bugfix | 1 + synapse/notifier.py | 5 +---- 3 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/3910 create mode 100644 changelog.d/3910.bugfix diff --git a/changelog.d/3910 b/changelog.d/3910 deleted file mode 100644 index 22ec2adc33..0000000000 --- a/changelog.d/3910 +++ /dev/null @@ -1 +0,0 @@ -Fix bug where things occaisonally were not being timed out correctly. diff --git a/changelog.d/3910.bugfix b/changelog.d/3910.bugfix new file mode 100644 index 0000000000..22ec2adc33 --- /dev/null +++ b/changelog.d/3910.bugfix @@ -0,0 +1 @@ +Fix bug where things occaisonally were not being timed out correctly. diff --git a/synapse/notifier.py b/synapse/notifier.py index 2e7525b7d2..f1d92c1395 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -25,10 +25,7 @@ from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge from synapse.types import StreamToken -from synapse.util.async_helpers import ( - ObservableDeferred, - timeout_deferred, -) +from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.logutils import log_function from synapse.util.metrics import Measure -- cgit 1.4.1 From b9158ac2bf46e15107162be136964c17bd8a948c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:16:34 +0100 Subject: Remove get_destination_retry_timings cache Currently we rely on the master to invalidate this cache promptly. However, after having moved most federation endpoints off of master this no longer happens, causing outbound fedeariont to get blackholed. Fixes #3798 --- synapse/storage/transactions.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 0c42bd3322..7d51f9612a 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -156,7 +156,6 @@ class TransactionStore(SQLBaseStore): """ pass - @cached(max_entries=10000) def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -212,10 +211,6 @@ class TransactionStore(SQLBaseStore): retry_last_ts, retry_interval): self.database_engine.lock_table(txn, "destinations") - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - # We need to be careful here as the data may have changed from under us # due to a worker setting the timings. -- cgit 1.4.1 From 83ee5592a5fae9335a0c4b62e3f2dea870296ac5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:20:16 +0100 Subject: Newsfile --- changelog.d/3914.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3914.bugfix diff --git a/changelog.d/3914.bugfix b/changelog.d/3914.bugfix new file mode 100644 index 0000000000..60fb8ec1ae --- /dev/null +++ b/changelog.d/3914.bugfix @@ -0,0 +1 @@ +Fix bug where outboud federation would stop talking to some servers when using workers -- cgit 1.4.1 From 392a54128c03128255ee538e17ed9bd87b95d4ca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:37:49 +0100 Subject: pep8 --- synapse/storage/transactions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 7d51f9612a..449bdc1e49 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json -- cgit 1.4.1 From bbab6ebfd9257ba9ad5e576c4b9b8bdea1c166cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:45:14 +0100 Subject: Fix up changelog and remove spurious comment --- changelog.d/3914.bugfix | 2 +- synapse/storage/transactions.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/changelog.d/3914.bugfix b/changelog.d/3914.bugfix index 60fb8ec1ae..27e6bad590 100644 --- a/changelog.d/3914.bugfix +++ b/changelog.d/3914.bugfix @@ -1 +1 @@ -Fix bug where outboud federation would stop talking to some servers when using workers +Fix bug where outbound federation would stop talking to some servers when using workers diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 449bdc1e49..baf0379a68 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -196,8 +196,6 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - # XXX: we could chose to not bother persisting this if our cache thinks - # this is a NOOP return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, -- cgit 1.4.1 From 1a24d4effaeadaa14e30d56c1a710fff296c2391 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 16:22:26 +0100 Subject: Newsfile --- changelog.d/3794.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3794.misc diff --git a/changelog.d/3794.misc b/changelog.d/3794.misc new file mode 100644 index 0000000000..6b98c9609b --- /dev/null +++ b/changelog.d/3794.misc @@ -0,0 +1 @@ +Speed up calculation of typing updates for replication -- cgit 1.4.1 From 642199570c57fd37b856c3808e08fe2923001b55 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 19 Sep 2018 17:28:18 +0100 Subject: Improve the logging when handling a federation transaction (#3904) Let's try to rationalise the logging that happens when we are processing an incoming transaction, to make it easier to figure out what is going wrong when they take ages. In particular: - make everything start with a [room_id event_id] prefix - make sure we log a warning when catching exceptions rather than just turning them into other, more cryptic, exceptions. --- changelog.d/3904.misc | 1 + synapse/handlers/federation.py | 164 +++++++++++++++++++++++++++-------------- synapse/util/retryutils.py | 2 +- 3 files changed, 111 insertions(+), 56 deletions(-) create mode 100644 changelog.d/3904.misc diff --git a/changelog.d/3904.misc b/changelog.d/3904.misc new file mode 100644 index 0000000000..1e3c8e1706 --- /dev/null +++ b/changelog.d/3904.misc @@ -0,0 +1 @@ +Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index f10b46414b..8d6bd7976d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -69,6 +69,27 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +def shortstr(iterable, maxitems=5): + """If iterable has maxitems or fewer, return the stringification of a list + containing those items. + + Otherwise, return the stringification of a a list with the first maxitems items, + followed by "...". + + Args: + iterable (Iterable): iterable to truncate + maxitems (int): number of items to return before truncating + + Returns: + unicode + """ + + items = list(itertools.islice(iterable, maxitems + 1)) + if len(items) <= maxitems: + return str(items) + return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]" + + class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: @@ -114,7 +135,6 @@ class FederationHandler(BaseHandler): self._room_pdu_linearizer = Linearizer("fed_room_pdu") @defer.inlineCallbacks - @log_function def on_receive_pdu( self, origin, pdu, get_missing=True, sent_to_us_directly=False, ): @@ -130,9 +150,17 @@ class FederationHandler(BaseHandler): Returns (Deferred): completes with None """ + room_id = pdu.room_id + event_id = pdu.event_id + + logger.info( + "[%s %s] handling received PDU: %s", + room_id, event_id, pdu, + ) + # We reprocess pdus when we have seen them only as outliers existing = yield self.store.get_event( - pdu.event_id, + event_id, allow_none=True, allow_rejected=True, ) @@ -147,7 +175,7 @@ class FederationHandler(BaseHandler): ) ) if already_seen: - logger.debug("Already seen pdu %s", pdu.event_id) + logger.debug("[%s %s]: Already seen pdu", room_id, event_id) return # do some initial sanity-checking of the event. In particular, make @@ -156,6 +184,7 @@ class FederationHandler(BaseHandler): try: self._sanity_check_event(pdu) except SynapseError as err: + logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id) raise FederationError( "ERROR", err.code, @@ -165,10 +194,12 @@ class FederationHandler(BaseHandler): # If we are currently in the process of joining this room, then we # queue up events for later processing. - if pdu.room_id in self.room_queues: - logger.info("Ignoring PDU %s for room %s from %s for now; join " - "in progress", pdu.event_id, pdu.room_id, origin) - self.room_queues[pdu.room_id].append((pdu, origin)) + if room_id in self.room_queues: + logger.info( + "[%s %s] Queuing PDU from %s for now: join in progress", + room_id, event_id, origin, + ) + self.room_queues[room_id].append((pdu, origin)) return # If we're no longer in the room just ditch the event entirely. This @@ -179,7 +210,7 @@ class FederationHandler(BaseHandler): # we should check if we *are* in fact in the room. If we are then we # can magically rejoin the room. is_in_room = yield self.auth.check_host_in_room( - pdu.room_id, + room_id, self.server_name ) if not is_in_room: @@ -188,8 +219,8 @@ class FederationHandler(BaseHandler): ) if was_in_room: logger.info( - "Ignoring PDU %s for room %s from %s as we've left the room!", - pdu.event_id, pdu.room_id, origin, + "[%s %s] Ignoring PDU from %s as we've left the room", + room_id, event_id, origin, ) defer.returnValue(None) @@ -204,8 +235,8 @@ class FederationHandler(BaseHandler): ) logger.debug( - "_handle_new_pdu min_depth for %s: %d", - pdu.room_id, min_depth + "[%s %s] min_depth: %d", + room_id, event_id, min_depth, ) prevs = {e_id for e_id, _ in pdu.prev_events} @@ -218,17 +249,18 @@ class FederationHandler(BaseHandler): # send to the clients. pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: - if get_missing and prevs - seen: + missing_prevs = prevs - seen + if get_missing and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( - "Acquiring lock for room %r to fetch %d missing events: %r...", - pdu.room_id, len(prevs - seen), list(prevs - seen)[:5], + "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) with (yield self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( - "Acquired lock for room %r to fetch %d missing events", - pdu.room_id, len(prevs - seen), + "[%s %s] Acquired room lock to fetch %d missing prev_events", + room_id, event_id, len(missing_prevs), ) yield self._get_missing_events_for_pdu( @@ -241,19 +273,23 @@ class FederationHandler(BaseHandler): if not prevs - seen: logger.info( - "Found all missing prev events for %s", pdu.event_id + "[%s %s] Found all missing prev_events", + room_id, event_id, ) - elif prevs - seen: + elif missing_prevs: logger.info( - "Not fetching %d missing events for room %r,event %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, - list(prevs - seen)[:5], + "[%s %s] Not recursively fetching %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) if sent_to_us_directly and prevs - seen: # If they have sent it to us directly, and the server # isn't telling us about the auth events that it's # made a message referencing, we explode + logger.warn( + "[%s %s] Failed to fetch %d prev events: rejecting", + room_id, event_id, len(prevs - seen), + ) raise FederationError( "ERROR", 403, @@ -270,15 +306,19 @@ class FederationHandler(BaseHandler): auth_chains = set() try: # Get the state of the events we know about - ours = yield self.store.get_state_groups(pdu.room_id, list(seen)) + ours = yield self.store.get_state_groups(room_id, list(seen)) state_groups.append(ours) # Ask the remote server for the states we don't # know about for p in prevs - seen: + logger.info( + "[%s %s] Requesting state at missing prev_event %s", + room_id, event_id, p, + ) state, got_auth_chain = ( yield self.federation_client.get_state_for_room( - origin, pdu.room_id, p + origin, room_id, p, ) ) auth_chains.update(got_auth_chain) @@ -291,19 +331,24 @@ class FederationHandler(BaseHandler): ev_ids, get_prev_content=False, check_redacted=False ) - room_version = yield self.store.get_room_version(pdu.room_id) + room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_groups, {pdu.event_id: pdu}, fetch + room_version, state_groups, {event_id: pdu}, fetch ) state = (yield self.store.get_events(state_map.values())).values() auth_chain = list(auth_chains) except Exception: + logger.warn( + "[%s %s] Error attempting to resolve state at missing " + "prev_events", + room_id, event_id, exc_info=True, + ) raise FederationError( "ERROR", 403, "We can't get valid state history.", - affected=pdu.event_id, + affected=event_id, ) yield self._process_received_pdu( @@ -322,15 +367,16 @@ class FederationHandler(BaseHandler): prevs (set(str)): List of event ids which we are missing min_depth (int): Minimum depth of events to return. """ - # We recalculate seen, since it may have changed. + + room_id = pdu.room_id + event_id = pdu.event_id + seen = yield self.store.have_seen_events(prevs) if not prevs - seen: return - latest = yield self.store.get_latest_event_ids_in_room( - pdu.room_id - ) + latest = yield self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us @@ -338,8 +384,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "Missing %d events for room %r pdu %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5] + "[%s %s]: Requesting %d prev_events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) # XXX: we set timeout to 10s to help workaround @@ -392,7 +438,7 @@ class FederationHandler(BaseHandler): missing_events = yield self.federation_client.get_missing_events( origin, - pdu.room_id, + room_id, earliest_events_ids=list(latest), latest_events=[pdu], limit=10, @@ -401,37 +447,46 @@ class FederationHandler(BaseHandler): ) logger.info( - "Got %d events: %r...", - len(missing_events), [e.event_id for e in missing_events[:5]] + "[%s %s]: Got %d prev_events: %s", + room_id, event_id, len(missing_events), shortstr(missing_events), ) # We want to sort these by depth so we process them and # tell clients about them in order. missing_events.sort(key=lambda x: x.depth) - for e in missing_events: - logger.info("Handling found event %s", e.event_id) + for ev in missing_events: + logger.info( + "[%s %s] Handling received prev_event %s", + room_id, event_id, ev.event_id, + ) try: yield self.on_receive_pdu( origin, - e, + ev, get_missing=False ) except FederationError as e: if e.code == 403: - logger.warn("Event %s failed history check.") + logger.warn( + "[%s %s] Received prev_event %s failed history check.", + room_id, event_id, ev.event_id, + ) else: raise - @log_function @defer.inlineCallbacks - def _process_received_pdu(self, origin, pdu, state, auth_chain): + def _process_received_pdu(self, origin, event, state, auth_chain): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. """ - event = pdu + room_id = event.room_id + event_id = event.event_id - logger.debug("Processing event: %s", event) + logger.debug( + "[%s %s] Processing event: %s", + room_id, event_id, event, + ) # FIXME (erikj): Awful hack to make the case where we are not currently # in the room work @@ -440,15 +495,16 @@ class FederationHandler(BaseHandler): # event. if state and auth_chain and not event.internal_metadata.is_outlier(): is_in_room = yield self.auth.check_host_in_room( - event.room_id, + room_id, self.server_name ) else: is_in_room = True + if not is_in_room: logger.info( - "Got event for room we're not in: %r %r", - event.room_id, event.event_id + "[%s %s] Got event for room we're not in", + room_id, event_id, ) try: @@ -460,7 +516,7 @@ class FederationHandler(BaseHandler): "ERROR", e.code, e.msg, - affected=event.event_id, + affected=event_id, ) else: @@ -509,12 +565,12 @@ class FederationHandler(BaseHandler): affected=event.event_id, ) - room = yield self.store.get_room(event.room_id) + room = yield self.store.get_room(room_id) if not room: try: yield self.store.store_room( - room_id=event.room_id, + room_id=room_id, room_creator_user_id="", is_public=False, ) @@ -542,7 +598,7 @@ class FederationHandler(BaseHandler): if newly_joined: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, event.room_id) + yield self.user_joined_room(user, room_id) @log_function @defer.inlineCallbacks @@ -1459,12 +1515,10 @@ class FederationHandler(BaseHandler): else: defer.returnValue(None) - @log_function def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @defer.inlineCallbacks - @log_function def _handle_new_event(self, origin, event, state=None, auth_events=None, backfilled=False): context = yield self._prep_event( @@ -1664,8 +1718,8 @@ class FederationHandler(BaseHandler): ) except AuthError as e: logger.warn( - "Rejecting %s because %s", - event.event_id, e.msg + "[%s %s] Rejecting: %s", + event.room_id, event.event_id, e.msg ) context.rejected = RejectedReason.AUTH_ERROR diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 8a3a06fd74..26cce7d197 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -188,7 +188,7 @@ class RetryDestinationLimiter(object): else: self.retry_interval = self.min_retry_interval - logger.debug( + logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", self.destination, exc_type, exc_val, self.retry_interval ) -- cgit 1.4.1 From aeca5a5ed5816a7797a0807142a41e14baac80f6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Sep 2018 16:28:18 +1000 Subject: Add a regression test for logging on failed connections (#3912) --- changelog.d/3912.misc | 1 + synapse/http/site.py | 4 +-- tests/test_server.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++-- tests/unittest.py | 3 ++- 4 files changed, 77 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3912.misc diff --git a/changelog.d/3912.misc b/changelog.d/3912.misc new file mode 100644 index 0000000000..87d73697ea --- /dev/null +++ b/changelog.d/3912.misc @@ -0,0 +1 @@ +Add a regression test for logging failed HTTP requests on Python 3. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index 9579e8cd0d..d41d672934 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -75,9 +75,9 @@ class SynapseRequest(Request): return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), - self.method, + self.method.decode('ascii', errors='replace'), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), self.site.site_tag, ) diff --git a/tests/test_server.py b/tests/test_server.py index ef74544e93..4045fdadc3 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -1,14 +1,35 @@ +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging import re +from six import StringIO + from twisted.internet.defer import Deferred -from twisted.test.proto_helpers import MemoryReactorClock +from twisted.python.failure import Failure +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource +from synapse.http.site import SynapseSite, logger from synapse.util import Clock from tests import unittest -from tests.server import make_request, render, setup_test_homeserver +from tests.server import FakeTransport, make_request, render, setup_test_homeserver class JsonResourceTests(unittest.TestCase): @@ -121,3 +142,52 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.result["code"], b'400') self.assertEqual(channel.json_body["error"], "Unrecognized request") self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") + + +class SiteTestCase(unittest.HomeserverTestCase): + def test_lose_connection(self): + """ + We log the URI correctly redacted when we lose the connection. + """ + + class HangingResource(Resource): + """ + A Resource that strategically hangs, as if it were processing an + answer. + """ + + def render(self, request): + return NOT_DONE_YET + + # Set up a logging handler that we can inspect afterwards + output = StringIO() + handler = logging.StreamHandler(output) + logger.addHandler(handler) + old_level = logger.level + logger.setLevel(10) + self.addCleanup(logger.setLevel, old_level) + self.addCleanup(logger.removeHandler, handler) + + # Make a resource and a Site, the resource will hang and allow us to + # time out the request while it's 'processing' + base_resource = Resource() + base_resource.putChild(b'', HangingResource()) + site = SynapseSite("test", "site_tag", {}, base_resource, "1.0") + + server = site.buildProtocol(None) + client = AccumulatingProtocol() + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + # Send a request with an access token that will get redacted + server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n") + self.pump() + + # Lose the connection + e = Failure(Exception("Failed123")) + server.connectionLost(e) + handler.flush() + + # Our access token is redacted and the failure reason is logged. + self.assertIn("/?access_token=", output.getvalue()) + self.assertIn("Failed123", output.getvalue()) diff --git a/tests/unittest.py b/tests/unittest.py index a3d39920db..56f3dca394 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -219,7 +219,8 @@ class HomeserverTestCase(TestCase): Function to be overridden in subclasses. """ - raise NotImplementedError() + hs = self.setup_test_homeserver() + return hs def prepare(self, reactor, clock, homeserver): """ -- cgit 1.4.1 From 741571cf22b342f2cfaf5c47f324397af7072bb3 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Sep 2018 18:12:45 +1000 Subject: Add a way to run tests in PostgreSQL in Docker (#3699) --- .gitignore | 1 + CONTRIBUTING.rst | 38 ++++++++++++++++++++++++++++++-------- MANIFEST.in | 1 + changelog.d/3699.misc | 2 ++ docker/Dockerfile-pgtests | 12 ++++++++++++ docker/run_pg_tests.sh | 20 ++++++++++++++++++++ test_postgresql.sh | 12 ++++++++++++ tox.ini | 2 +- 8 files changed, 79 insertions(+), 9 deletions(-) create mode 100644 changelog.d/3699.misc create mode 100644 docker/Dockerfile-pgtests create mode 100755 docker/run_pg_tests.sh create mode 100755 test_postgresql.sh diff --git a/.gitignore b/.gitignore index a725725235..3b2252ad8a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.pyc .*.swp *~ +*.lock .DS_Store _trial_temp/ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f9de78a460..6ef7d48dc7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -30,12 +30,28 @@ use github's pull request workflow to review the contribution, and either ask you to make any refinements needed or merge it and make them ourselves. The changes will then land on master when we next do a release. -We use `Jenkins `_ and -`Travis `_ for continuous -integration. All pull requests to synapse get automatically tested by Travis; -the Jenkins builds require an adminstrator to start them. If your change -breaks the build, this will be shown in github, so please keep an eye on the -pull request for feedback. +We use `CircleCI `_ and `Travis CI +`_ for continuous integration. All +pull requests to synapse get automatically tested by Travis and CircleCI. +If your change breaks the build, this will be shown in GitHub, so please +keep an eye on the pull request for feedback. + +To run unit tests in a local development environment, you can use: + +- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for + SQLite-backed Synapse on Python 2.7. +- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5. +- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6. +- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7 + (requires a running local PostgreSQL with access to create databases). +- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7 + (requires Docker). Entirely self-contained, recommended if you don't want to + set up PostgreSQL yourself. + +Docker images are available for running the integration tests (SyTest) locally, +see the `documentation in the SyTest repo +`_ for more +information. Code style ~~~~~~~~~~ @@ -77,7 +93,8 @@ AUTHORS.rst file for the project in question. Please feel free to include a change to AUTHORS.rst in your pull request to list yourself and a short description of the area(s) you've worked on. Also, we sometimes have swag to give away to contributors - if you feel that Matrix-branded apparel is missing -from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :) +from your life, please mail us your shipping address to matrix at matrix.org and +we'll try to fix it :) Sign off ~~~~~~~~ @@ -144,4 +161,9 @@ flag to ``git commit``, which uses the name and email set in your Conclusion ~~~~~~~~~~ -That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do! +That's it! Matrix is a very open and collaborative project as you might expect +given our obsession with open communication. If we're going to successfully +matrix together all the fragmented communication technologies out there we are +reliant on contributions and collaboration from the community to do so. So +please get involved - and we hope you have as much fun hacking on Matrix as we +do! diff --git a/MANIFEST.in b/MANIFEST.in index e0826ba544..47ae5a77b9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -28,6 +28,7 @@ exclude jenkins*.sh exclude jenkins* exclude Dockerfile exclude .dockerignore +exclude test_postgresql.sh recursive-exclude jenkins *.sh include pyproject.toml diff --git a/changelog.d/3699.misc b/changelog.d/3699.misc new file mode 100644 index 0000000000..437efbd98f --- /dev/null +++ b/changelog.d/3699.misc @@ -0,0 +1,2 @@ +Unit tests can now be run under PostgreSQL in Docker using +``test_postgresql.sh``. diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests new file mode 100644 index 0000000000..7da8eeb9eb --- /dev/null +++ b/docker/Dockerfile-pgtests @@ -0,0 +1,12 @@ +# Use the Sytest image that comes with a lot of the build dependencies +# pre-installed +FROM matrixdotorg/sytest:latest + +# The Sytest image doesn't come with python, so install that +RUN apt-get -qq install -y python python-dev python-pip + +# We need tox to run the tests in run_pg_tests.sh +RUN pip install tox + +ADD run_pg_tests.sh /pg_tests.sh +ENTRYPOINT /pg_tests.sh diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh new file mode 100755 index 0000000000..e77424c41a --- /dev/null +++ b/docker/run_pg_tests.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# This script runs the PostgreSQL tests inside a Docker container. It expects +# the relevant source files to be mounted into /src (done automatically by the +# caller script). It will set up the database, run it, and then use the tox +# configuration to run the tests. + +set -e + +# Set PGUSER so Synapse's tests know what user to connect to the database with +export PGUSER=postgres + +# Initialise & start the database +su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres +su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres + +# Run the tests +cd /src +export TRIAL_FLAGS="-j 4" +tox --workdir=/tmp -e py27-postgres diff --git a/test_postgresql.sh b/test_postgresql.sh new file mode 100755 index 0000000000..1ffcaabd31 --- /dev/null +++ b/test_postgresql.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# This script builds the Docker image to run the PostgreSQL tests, and then runs +# the tests. + +set -e + +# Build, and tag +docker build docker/ -f docker/Dockerfile-pgtests -t synapsepgtests + +# Run, mounting the current directory into /src +docker run --rm -it -v $(pwd)\:/src synapsepgtests diff --git a/tox.ini b/tox.ini index 80ac9324df..88875c3318 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ envlist = packaging, py27, py36, pep8, check_isort [base] deps = coverage - Twisted>=15.1 + Twisted>=17.1 mock python-subunit junitxml -- cgit 1.4.1 From 1f3f5fcf52efdf5215dc6c7cd3805b1357bf8236 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Sep 2018 20:14:34 +1000 Subject: Fix client IPs being broken on Python 3 (#3908) --- .travis.yml | 5 + changelog.d/3908.bugfix | 1 + synapse/http/site.py | 2 +- synapse/storage/client_ips.py | 34 ++++--- tests/server.py | 8 +- tests/storage/test_client_ips.py | 202 ++++++++++++++++++++++++++++++++------- tests/unittest.py | 7 +- tests/utils.py | 27 +++++- tox.ini | 10 ++ 9 files changed, 238 insertions(+), 58 deletions(-) create mode 100644 changelog.d/3908.bugfix diff --git a/.travis.yml b/.travis.yml index b3ee66da8f..b6faca4b92 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,11 @@ matrix: - python: 3.6 env: TOX_ENV=py36 + - python: 3.6 + env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" + services: + - postgresql + - python: 3.6 env: TOX_ENV=check_isort diff --git a/changelog.d/3908.bugfix b/changelog.d/3908.bugfix new file mode 100644 index 0000000000..518aee6c4d --- /dev/null +++ b/changelog.d/3908.bugfix @@ -0,0 +1 @@ +Fix adding client IPs to the database failing on Python 3. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index d41d672934..50be2de3bb 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -308,7 +308,7 @@ class XForwardedForRequest(SynapseRequest): C{b"-"}. """ return self.requestHeaders.getRawHeaders( - b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip() + b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip().decode('ascii') class SynapseRequestFactory(object): diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 8fc678fa67..9ad17b7c25 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -119,21 +119,25 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): for entry in iteritems(to_update): (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry - self._simple_upsert_txn( - txn, - table="user_ips", - keyvalues={ - "user_id": user_id, - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "device_id": device_id, - }, - values={ - "last_seen": last_seen, - }, - lock=False, - ) + try: + self._simple_upsert_txn( + txn, + table="user_ips", + keyvalues={ + "user_id": user_id, + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "device_id": device_id, + }, + values={ + "last_seen": last_seen, + }, + lock=False, + ) + except Exception as e: + # Failed to upsert, log and continue + logger.error("Failed to insert client IP %r: %r", entry, e) @defer.inlineCallbacks def get_last_client_ip_by_device(self, user_id, device_id): diff --git a/tests/server.py b/tests/server.py index ccea3baa55..7bee58dff1 100644 --- a/tests/server.py +++ b/tests/server.py @@ -98,7 +98,7 @@ class FakeSite: return FakeLogger() -def make_request(method, path, content=b"", access_token=None): +def make_request(method, path, content=b"", access_token=None, request=SynapseRequest): """ Make a web request using the given method and path, feed it the content, and return the Request and the Channel underneath. @@ -120,14 +120,16 @@ def make_request(method, path, content=b"", access_token=None): site = FakeSite() channel = FakeChannel() - req = SynapseRequest(site, channel) + req = request(site, channel) req.process = lambda: b"" req.content = BytesIO(content) if access_token: req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) - req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1") + if content: + req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + req.requestReceived(method, path, b"1.1") return req, channel diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c9b02a062b..2ffbb9f14f 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,35 +13,45 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import hashlib +import hmac +import json + from mock import Mock from twisted.internet import defer -import tests.unittest -import tests.utils +from synapse.http.site import XForwardedForRequest +from synapse.rest.client.v1 import admin, login + +from tests import unittest -class ClientIpStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(ClientIpStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore - self.clock = None # type: tests.utils.MockClock +class ClientIpStoreTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs - @defer.inlineCallbacks - def setUp(self): - self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup) + def prepare(self, hs, reactor, clock): self.store = self.hs.get_datastore() - self.clock = self.hs.get_clock() - @defer.inlineCallbacks def test_insert_new_client_ip(self): - self.clock.now = 12345678 + self.reactor.advance(12345678) + user_id = "@user:id" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - result = yield self.store.get_last_client_ip_by_device(user_id, "device_id") + # Trigger the storage loop + self.reactor.advance(10) + + result = self.get_success( + self.store.get_last_client_ip_by_device(user_id, "device_id") + ) r = result[(user_id, "device_id")] self.assertDictContainsSubset( @@ -55,18 +66,18 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): r, ) - @defer.inlineCallbacks def test_disabled_monthly_active_user(self): self.hs.config.limit_usage_by_mau = False self.hs.config.max_mau_value = 50 user_id = "@user:server" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_full(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 @@ -76,38 +87,159 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.store.get_monthly_active_count = Mock( return_value=defer.succeed(lots_of_users) ) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) - @defer.inlineCallbacks def test_updating_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - yield self.store.register(user_id=user_id, token="123", password_hash=None) + self.get_success( + self.store.register(user_id=user_id, token="123", password_hash=None) + ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) + + +class ClientIpAuthTestCase(unittest.HomeserverTestCase): + + servlets = [admin.register_servlets, login.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.hs.config.registration_shared_secret = u"shared" + self.store = self.hs.get_datastore() + + # Create the user + request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": "bob", + "password": "abc123", + "admin": True, + "mac": want_mac, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') + ) + self.render(request) + + self.assertEqual(channel.code, 200) + self.user_id = channel.json_body["user_id"] + + def test_request_with_xforwarded(self): + """ + The IP in X-Forwarded-For is entered into the client IPs table. + """ + self._runtest( + {b"X-Forwarded-For": b"127.9.0.1"}, + "127.9.0.1", + {"request": XForwardedForRequest}, + ) + + def test_request_from_getPeer(self): + """ + The IP returned by getPeer is entered into the client IPs table, if + there's no X-Forwarded-For header. + """ + self._runtest({}, "127.0.0.1", {}) + + def _runtest(self, headers, expected_ip, make_request_args): + device_id = "bleb" + + body = json.dumps( + { + "type": "m.login.password", + "user": "bob", + "password": "abc123", + "device_id": device_id, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", body.encode('utf8'), **make_request_args + ) + self.render(request) + self.assertEqual(channel.code, 200) + access_token = channel.json_body["access_token"].encode('ascii') + + # Advance to a known time + self.reactor.advance(123456 - self.reactor.seconds()) + + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/admin/users/" + self.user_id, + body.encode('utf8'), + access_token=access_token, + **make_request_args + ) + request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza") + + # Add the optional headers + for h, v in headers.items(): + request.requestHeaders.addRawHeader(h, v) + self.render(request) + + # Advance so the save loop occurs + self.reactor.advance(100) + + result = self.get_success( + self.store.get_last_client_ip_by_device(self.user_id, device_id) + ) + r = result[(self.user_id, device_id)] + self.assertDictContainsSubset( + { + "user_id": self.user_id, + "device_id": device_id, + "ip": expected_ip, + "user_agent": "Mozzila pizza", + "last_seen": 123456100, + }, + r, + ) diff --git a/tests/unittest.py b/tests/unittest.py index 56f3dca394..ef905e6389 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -26,6 +26,7 @@ from twisted.internet.defer import Deferred from twisted.trial import unittest from synapse.http.server import JsonResource +from synapse.http.site import SynapseRequest from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util.logcontext import LoggingContextFilter @@ -237,7 +238,9 @@ class HomeserverTestCase(TestCase): Function to optionally be overridden in subclasses. """ - def make_request(self, method, path, content=b""): + def make_request( + self, method, path, content=b"", access_token=None, request=SynapseRequest + ): """ Create a SynapseRequest at the path using the method and containing the given content. @@ -255,7 +258,7 @@ class HomeserverTestCase(TestCase): if isinstance(content, dict): content = json.dumps(content).encode('utf8') - return make_request(method, path, content) + return make_request(method, path, content, access_token, request) def render(self, request): """ diff --git a/tests/utils.py b/tests/utils.py index 215226debf..aaed1149c3 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,7 +16,9 @@ import atexit import hashlib import os +import time import uuid +import warnings from inspect import getcallargs from mock import Mock, patch @@ -237,20 +239,41 @@ def setup_test_homeserver( else: # We need to do cleanup on PostgreSQL def cleanup(): + import psycopg2 + # Close all the db pools hs.get_db_pool().close() + dropped = False + # Drop the test database db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER ) db_conn.autocommit = True cur = db_conn.cursor() - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() + + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for x in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + cur.close() db_conn.close() + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + if not LEAVE_DB: # Register the cleanup hook cleanup_func(cleanup) diff --git a/tox.ini b/tox.ini index 88875c3318..e4db563b4b 100644 --- a/tox.ini +++ b/tox.ini @@ -70,6 +70,16 @@ usedevelop=true [testenv:py36] usedevelop=true +[testenv:py36-postgres] +usedevelop=true +deps = + {[base]deps} + psycopg2 +setenv = + {[base]setenv} + SYNAPSE_POSTGRES = 1 + + [testenv:packaging] deps = check-manifest -- cgit 1.4.1 From 703de4ec13b95f0c8f2e04bf8c20b3906b010592 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 20 Sep 2018 13:06:55 +0100 Subject: Comments and interface cleanup for on_receive_pdu Add some informative comments about what's going on here. Also, `sent_to_us_directly` and `get_missing` were doing the same thing (apart from in `_handle_queued_pdus`, which looks like a bug), so let's get rid of `get_missing` and use `sent_to_us_directly` consistently. --- synapse/federation/federation_server.py | 2 +- synapse/handlers/federation.py | 69 ++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 24 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index dbee404ea7..9a571e4fc7 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -618,7 +618,7 @@ class FederationServer(FederationBase): ) yield self.handler.on_receive_pdu( - origin, pdu, get_missing=True, sent_to_us_directly=True, + origin, pdu, sent_to_us_directly=True, ) def __str__(self): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8d6bd7976d..2ccdc3bfa7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -136,7 +136,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_receive_pdu( - self, origin, pdu, get_missing=True, sent_to_us_directly=False, + self, origin, pdu, sent_to_us_directly=False, ): """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events @@ -145,7 +145,8 @@ class FederationHandler(BaseHandler): origin (str): server which initiated the /send/ transaction. Will be used to fetch missing events or state. pdu (FrozenEvent): received PDU - get_missing (bool): True if we should fetch missing prev_events + sent_to_us_directly (bool): True if this event was pushed to us; False if + we pulled it as the result of a missing prev_event. Returns (Deferred): completes with None """ @@ -250,7 +251,7 @@ class FederationHandler(BaseHandler): pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: missing_prevs = prevs - seen - if get_missing and missing_prevs: + if sent_to_us_directly and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( @@ -282,24 +283,46 @@ class FederationHandler(BaseHandler): room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) - if sent_to_us_directly and prevs - seen: - # If they have sent it to us directly, and the server - # isn't telling us about the auth events that it's - # made a message referencing, we explode - logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), - ) - raise FederationError( - "ERROR", - 403, - ( - "Your server isn't divulging details about prev_events " - "referenced in this event." - ), - affected=pdu.event_id, - ) - elif prevs - seen: + if prevs - seen: + # We've still not been able to get all of the prev_events for this event. + # + # In this case, we need to fall back to asking another server in the + # federation for the state at this event. That's ok provided we then + # resolve the state against other bits of the DAG before using it (which + # will ensure that you can't just take over a room by sending an event, + # withholding its prev_events, and declaring yourself to be an admin in + # the subsequent state request). + # + # Now, if we're pulling this event as a missing prev_event, then clearly + # this event is not going to become the only forward-extremity and we are + # guaranteed to resolve its state against our existing forward + # extremities, so that should be fine. + # + # On the other hand, if this event was pushed to us, it is possible for + # it to become the only forward-extremity in the room, and we would then + # trust its state to be the state for the whole room. This is very bad. + # Further, if the event was pushed to us, there is no excuse for us not to + # have all the prev_events. We therefore reject any such events. + # + # XXX this really feels like it could/should be merged with the above, + # but there is an interaction with min_depth that I'm not really + # following. + + if sent_to_us_directly: + logger.warn( + "[%s %s] Failed to fetch %d prev events: rejecting", + room_id, event_id, len(prevs - seen), + ) + raise FederationError( + "ERROR", + 403, + ( + "Your server isn't divulging details about prev_events " + "referenced in this event." + ), + affected=pdu.event_id, + ) + # Calculate the state of the previous events, and # de-conflict them to find the current state. state_groups = [] @@ -464,7 +487,7 @@ class FederationHandler(BaseHandler): yield self.on_receive_pdu( origin, ev, - get_missing=False + sent_to_us_directly=False, ) except FederationError as e: if e.code == 403: @@ -1112,7 +1135,7 @@ class FederationHandler(BaseHandler): try: logger.info("Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id) - yield self.on_receive_pdu(origin, p) + yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( "Error handling queued PDU %s from %s: %s", -- cgit 1.4.1 From 6bb9a4b1d6666ac918b3809158c3210bde630671 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 20 Sep 2018 13:15:20 +0100 Subject: changelog --- changelog.d/3924.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3924.misc diff --git a/changelog.d/3924.misc b/changelog.d/3924.misc new file mode 100644 index 0000000000..8d010e0bd9 --- /dev/null +++ b/changelog.d/3924.misc @@ -0,0 +1 @@ +Comments and interface cleanup for on_receive_pdu \ No newline at end of file -- cgit 1.4.1 From b28a7ed5030148139af6c6c03e635b068a6eae15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 13:44:20 +0100 Subject: Fix spurious exceptions when client closes conncetion If a HTTP handler throws an exception while processing a request we automatically write a JSON error response. If the handler had already started writing a response twisted throws an exception. We should check for this case and simple abort the connection if there was an error after the response had started being written. --- synapse/http/server.py | 49 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index 2d5c23e673..b4b25cab19 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -84,10 +84,21 @@ def wrap_json_request_handler(h): logger.info( "%s SynapseError: %s - %s", request, code, e.msg ) - respond_with_json( - request, code, e.error_dict(), send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, code, e.error_dict(), send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) except Exception: # failure.Failure() fishes the original Failure out @@ -100,16 +111,26 @@ def wrap_json_request_handler(h): request, f.getTraceback().rstrip(), ) - respond_with_json( - request, - 500, - { - "error": "Internal server error", - "errcode": Codes.UNKNOWN, - }, - send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, + 500, + { + "error": "Internal server error", + "errcode": Codes.UNKNOWN, + }, + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) return wrap_async_request_handler(wrapped_request_handler) -- cgit 1.4.1 From 13f6f1624b21aab82fc7f0edf90a61ce635b91be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 13:52:09 +0100 Subject: Newsfile --- changelog.d/3925.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3925.misc diff --git a/changelog.d/3925.misc b/changelog.d/3925.misc new file mode 100644 index 0000000000..3e41f78ff5 --- /dev/null +++ b/changelog.d/3925.misc @@ -0,0 +1 @@ +Fix spurious exceptions when remote http client closes conncetion -- cgit 1.4.1 From 8dfb33d325a7b231668973338894d08f0d5436c9 Mon Sep 17 00:00:00 2001 From: Jan Christian Grünhage Date: Wed, 19 Sep 2018 13:48:41 +0200 Subject: make python 3 work in the docker container --- .dockerignore | 1 + docker/Dockerfile | 4 +++- docker/start.py | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.dockerignore b/.dockerignore index 0180602e56..81e8f6d50a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,6 +2,7 @@ Dockerfile .travis.yml .gitignore demo/etc +synctl tox.ini .git/* .tox/* diff --git a/docker/Dockerfile b/docker/Dockerfile index 20d3fe3bd8..ad0ae0b734 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,5 @@ -FROM docker.io/python:2-alpine3.8 +ARG PYTHON_VERSION=2 +FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 COPY . /synapse @@ -12,6 +13,7 @@ RUN apk add --no-cache --virtual .build_deps \ postgresql-dev \ zlib-dev \ && cd /synapse \ + && sed -i 's/\["synctl"\] + //' setup.py \ && apk add --no-cache --virtual .runtime_deps \ libffi \ libjpeg-turbo \ diff --git a/docker/start.py b/docker/start.py index 90e8b9c51a..346df8c87f 100755 --- a/docker/start.py +++ b/docker/start.py @@ -5,6 +5,7 @@ import os import sys import subprocess import glob +import codecs # Utility functions convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) @@ -23,7 +24,7 @@ def generate_secrets(environ, secrets): with open(filename) as handle: value = handle.read() else: print("Generating a random secret for {}".format(name)) - value = os.urandom(32).encode("hex") + value = codecs.encode(os.urandom(32), "hex").decode() with open(filename, "w") as handle: handle.write(value) environ[secret] = value -- cgit 1.4.1 From 3af7a95a9d8b4cf16632e4b1b2c84ced89001f0c Mon Sep 17 00:00:00 2001 From: Jan Christian Grünhage Date: Wed, 19 Sep 2018 13:51:46 +0200 Subject: add changelog --- changelog.d/3911.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3911.misc diff --git a/changelog.d/3911.misc b/changelog.d/3911.misc new file mode 100644 index 0000000000..e31311d520 --- /dev/null +++ b/changelog.d/3911.misc @@ -0,0 +1 @@ +Fix the docker image building on python 3 -- cgit 1.4.1 From 9ea408441f527064aeafb5a7076a10a15f2c580c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 16:15:21 +0100 Subject: Handle exceptions thrown by background tasks Fixes #3921 --- synapse/metrics/background_process_metrics.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 167167be0a..e5293587e3 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -191,6 +191,8 @@ def run_as_background_process(desc, func, *args, **kwargs): try: yield func(*args, **kwargs) + except Exception: + logger.exception("Background process '%s' threw an exception", desc) finally: proc.update_metrics() -- cgit 1.4.1 From 168491c412775918b3c02dc8b10c4e3b7a50b674 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 16:16:52 +0100 Subject: Newsfile --- changelog.d/3927.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3927.misc diff --git a/changelog.d/3927.misc b/changelog.d/3927.misc new file mode 100644 index 0000000000..4bd8e25f67 --- /dev/null +++ b/changelog.d/3927.misc @@ -0,0 +1 @@ +Log exceptions thrown by background tasks -- cgit 1.4.1 From 94ae1dea3cf746ba5f26b1ab57f945cb2e734ba1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 17:05:34 +0100 Subject: Add missing logger --- synapse/metrics/background_process_metrics.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index e5293587e3..173908299c 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import threading import six @@ -23,6 +24,9 @@ from twisted.internet import defer from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +logger = logging.getLogger(__name__) + + _background_process_start_count = Counter( "synapse_background_process_start_count", "Number of background processes started", -- cgit 1.4.1 From a2ddaa90f21074a659abde45b712ab39324e97e4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 20 Sep 2018 21:21:54 +0100 Subject: Always LL ourselves if we're in a room to simplify clients (#3916) Should fix https://github.com/vector-im/riot-web/issues/7209 --- changelog.d/3916.feature | 1 + synapse/handlers/sync.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/3916.feature diff --git a/changelog.d/3916.feature b/changelog.d/3916.feature new file mode 100644 index 0000000000..13282d992b --- /dev/null +++ b/changelog.d/3916.feature @@ -0,0 +1 @@ +Always LL ourselves if we're in a room diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9bca4e7067..b598916b21 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -713,6 +713,10 @@ class SyncHandler(object): ) ] + # always make sure we LL ourselves so we know we're in the room + # (if we are), to fix https://github.com/vector-im/riot-web/issues/7209 + types.append((EventTypes.Member, sync_config.user.to_string())) + # only apply the filtering to room members filtered_types = [EventTypes.Member] -- cgit 1.4.1 From 8601c24287c452c0d9a803b130e0f68cf6169f6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:19:46 +0100 Subject: Fix some instances of ExpiringCache not expiring cache items ExpiringCache required that `start()` be called before it would actually start expiring entries. A number of places didn't do that. This PR removes `start` from ExpiringCache, and automatically starts backround reaping process on creation instead. --- synapse/app/appservice.py | 1 - synapse/app/client_reader.py | 1 - synapse/app/event_creator.py | 1 - synapse/app/federation_reader.py | 1 - synapse/app/federation_sender.py | 1 - synapse/app/frontend_proxy.py | 1 - synapse/app/homeserver.py | 1 - synapse/app/media_repository.py | 1 - synapse/app/pusher.py | 1 - synapse/app/synchrotron.py | 1 - synapse/app/user_dir.py | 1 - synapse/federation/federation_client.py | 28 ++++++++++++--------------- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/state/__init__.py | 9 --------- synapse/util/caches/expiringcache.py | 1 - tests/util/test_expiring_cache.py | 1 - 16 files changed, 12 insertions(+), 39 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 86b5067400..02039f7e79 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -172,7 +172,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ce2b113dbb..4c73c637bb 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -181,7 +181,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index f98e456ea0..bc82197b2a 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -199,7 +199,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 60f5973505..18ca71ef99 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -168,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 60dd09aac3..6501c57792 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -201,7 +201,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) _base.start_worker_reactor("synapse-federation-sender", config) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8c0b9c67b0..b076fbe522 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -258,7 +258,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3241ded188..8c5d858b0b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -384,7 +384,6 @@ def setup(config_options): def start(): hs.get_pusherpool().start() - hs.get_state_handler().start_caching() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() hs.get_federation_client().start_get_pdu_cache() diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index e3dbb3b4e6..992d182dba 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -168,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 244c604de9..2ec4c7defb 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -228,7 +228,6 @@ def start(config_options): def start(): ps.get_pusherpool().start() ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 6662340797..df81b7bcbe 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -435,7 +435,6 @@ def start(config_options): def start(): ss.get_datastore().start_profiling() - ss.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 96ffcaf073..b383e79c1c 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -229,7 +229,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index fe67b2ff42..5a92428f56 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -66,6 +66,14 @@ class FederationClient(FederationBase): self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() + self._get_pdu_cache = ExpiringCache( + cache_name="get_pdu_cache", + clock=self._clock, + max_len=1000, + expiry_ms=120 * 1000, + reset_expiry_on_get=False, + ) + def _clear_tried_cache(self): """Clear pdu_destination_tried cache""" now = self._clock.time_msec() @@ -82,17 +90,6 @@ class FederationClient(FederationBase): if destination_dict: self.pdu_destination_tried[event_id] = destination_dict - def start_get_pdu_cache(self): - self._get_pdu_cache = ExpiringCache( - cache_name="get_pdu_cache", - clock=self._clock, - max_len=1000, - expiry_ms=120 * 1000, - reset_expiry_on_get=False, - ) - - self._get_pdu_cache.start() - @log_function def make_query(self, destination, query_type, args, retry_on_dns_fail=False, ignore_backoff=False): @@ -229,10 +226,9 @@ class FederationClient(FederationBase): # TODO: Rate limit the number of times we try and get the same event. - if self._get_pdu_cache: - ev = self._get_pdu_cache.get(event_id) - if ev: - defer.returnValue(ev) + ev = self._get_pdu_cache.get(event_id) + if ev: + defer.returnValue(ev) pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -285,7 +281,7 @@ class FederationClient(FederationBase): ) continue - if self._get_pdu_cache is not None and signed_pdu: + if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu defer.returnValue(signed_pdu) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index cad2dec33a..af01040a38 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -79,7 +79,6 @@ class PreviewUrlResource(Resource): # don't spider URLs more often than once an hour expiry_ms=60 * 60 * 1000, ) - self._cache.start() self._cleaner_loop = self.clock.looping_call( self._start_expire_url_cache_data, 10 * 1000, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index d7ae22a661..b22495c1f9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -95,10 +95,6 @@ class StateHandler(object): self.hs = hs self._state_resolution_handler = hs.get_state_resolution_handler() - def start_caching(self): - # TODO: remove this shim - self._state_resolution_handler.start_caching() - @defer.inlineCallbacks def get_current_state(self, room_id, event_type=None, state_key="", latest_event_ids=None): @@ -428,9 +424,6 @@ class StateResolutionHandler(object): self._state_cache = None self.resolve_linearizer = Linearizer(name="state_resolve_lock") - def start_caching(self): - logger.debug("start_caching") - self._state_cache = ExpiringCache( cache_name="state_cache", clock=self.clock, @@ -440,8 +433,6 @@ class StateResolutionHandler(object): reset_expiry_on_get=True, ) - self._state_cache.start() - @defer.inlineCallbacks @log_function def resolve_state_groups( diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index ce85b2ae11..921a9c5b29 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -58,7 +58,6 @@ class ExpiringCache(object): self.metrics = register_cache("expiring", cache_name, self) - def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire return diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 5cbada4eda..50bc7702d2 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -65,7 +65,6 @@ class ExpiringCacheTestCase(unittest.TestCase): def test_time_eviction(self): clock = MockClock() cache = ExpiringCache("test", clock, expiry_ms=1000) - cache.start() cache["key"] = 1 clock.advance_time(0.5) -- cgit 1.4.1 From d3f80cbc9c157303484ae6aac70c60ed2bbec19d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:24:39 +0100 Subject: Newsfile --- changelog.d/3932.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3932.bugfix diff --git a/changelog.d/3932.bugfix b/changelog.d/3932.bugfix new file mode 100644 index 0000000000..7578414ede --- /dev/null +++ b/changelog.d/3932.bugfix @@ -0,0 +1 @@ +Fix some instances of ExpiringCache not expiring cache items -- cgit 1.4.1 From 79eded1ae4f0d341918f5c58076f1e60cd400e8a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:52:21 +0100 Subject: Make ExpiringCache slightly more performant --- synapse/util/caches/expiringcache.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 921a9c5b29..48ca2d634d 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,6 +16,8 @@ import logging from collections import OrderedDict +from six import iteritems + from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -127,7 +129,7 @@ class ExpiringCache(object): keys_to_delete = set() - for key, cache_entry in self._cache.items(): + for key, cache_entry in iteritems(self._cache): if now - cache_entry.time > self._expiry_ms: keys_to_delete.add(key) @@ -149,6 +151,8 @@ class ExpiringCache(object): class _CacheEntry(object): + __slots__ = ["time", "value"] + def __init__(self, time, value): self.time = time self.value = value -- cgit 1.4.1 From fdd1a62e8d09ddccbe685fe7d7840990a9c06241 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:55:47 +0100 Subject: Add a five minute cache to get_destination_retry_timings Hopefully helps with #3931 --- synapse/storage/transactions.py | 23 ++++++++++++++++++++++- synapse/util/caches/expiringcache.py | 13 +++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index baf0379a68..ab54977a75 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -23,6 +23,7 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util.caches.expiringcache import ExpiringCache from ._base import SQLBaseStore, db_to_json @@ -49,6 +50,8 @@ _UpdateTransactionRow = namedtuple( ) ) +SENTINEL = object() + class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. @@ -59,6 +62,12 @@ class TransactionStore(SQLBaseStore): self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) + self._destination_retry_cache = ExpiringCache( + cache_name="get_destination_retry_timings", + clock=self._clock, + expiry_ms=5 * 60 * 1000, + ) + def get_received_txn_response(self, transaction_id, origin): """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response @@ -155,6 +164,7 @@ class TransactionStore(SQLBaseStore): """ pass + @defer.inlineCallbacks def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -165,10 +175,20 @@ class TransactionStore(SQLBaseStore): None if not retrying Otherwise a dict for the retry scheme """ - return self.runInteraction( + + result = self._destination_retry_cache.get(destination, SENTINEL) + if result is not SENTINEL: + defer.returnValue(result) + + result = yield self.runInteraction( "get_destination_retry_timings", self._get_destination_retry_timings, destination) + # We don't hugely care about race conditions between getting and + # invalidating the cache, since we time out fairly quickly anyway. + self._destination_retry_cache[destination] = result + defer.returnValue(result) + def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( txn, @@ -196,6 +216,7 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ + self._destination_retry_cache.pop(destination) return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 48ca2d634d..346669e4ce 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -24,6 +24,9 @@ from synapse.util.caches import register_cache logger = logging.getLogger(__name__) +SENTINEL = object() + + class ExpiringCache(object): def __init__(self, cache_name, clock, max_len=0, expiry_ms=0, reset_expiry_on_get=False, iterable=False): @@ -102,6 +105,16 @@ class ExpiringCache(object): return entry.value + def pop(self, key, default=None): + value = self._cache.pop(key, SENTINEL) + if value is SENTINEL: + return default + + if self.iterable: + self._size_estimate -= len(value.value) + + return value + def __contains__(self, key): return key in self._cache -- cgit 1.4.1 From 5230bc147159d6ffbe04d27e695064cac44166d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 15:05:37 +0100 Subject: Newsfile --- changelog.d/3933.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3933.misc diff --git a/changelog.d/3933.misc b/changelog.d/3933.misc new file mode 100644 index 0000000000..6545871f55 --- /dev/null +++ b/changelog.d/3933.misc @@ -0,0 +1 @@ +Add a cache to get_destination_retry_timings -- cgit 1.4.1 From 19dc676d1aaf66a45c9a1810132952f7ae3ca2aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 16:25:42 +0100 Subject: Fix ExpiringCache.__len__ to be accurate It used to try and produce an estimate, which was sometimes negative. This caused metrics to be sad, so lets always just calculate it from scratch. --- synapse/util/caches/expiringcache.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 346669e4ce..8ac56d85ff 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,7 +16,7 @@ import logging from collections import OrderedDict -from six import iteritems +from six import iteritems, itervalues from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -59,8 +59,6 @@ class ExpiringCache(object): self.iterable = iterable - self._size_estimate = 0 - self.metrics = register_cache("expiring", cache_name, self) if not self._expiry_ms: @@ -79,16 +77,11 @@ class ExpiringCache(object): now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) - if self.iterable: - self._size_estimate += len(value) - # Evict if there are now too many items while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - removed_len = len(value.value) - self.metrics.inc_evictions(removed_len) - self._size_estimate -= removed_len + self.metrics.inc_evictions(len(value.value)) else: self.metrics.inc_evictions() @@ -111,7 +104,9 @@ class ExpiringCache(object): return default if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() return value @@ -149,7 +144,9 @@ class ExpiringCache(object): for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() logger.debug( "[%s] _prune_cache before: %d, after len: %d", @@ -158,7 +155,7 @@ class ExpiringCache(object): def __len__(self): if self.iterable: - return self._size_estimate + return sum(len(entry.value) for entry in itervalues(self._cache)) else: return len(self._cache) -- cgit 1.4.1 From e302f40e20df3b2fdb1a3fd18abd4164455e8ac7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 24 Sep 2018 23:40:05 +1000 Subject: update version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index f6a194dfc2..58244a5dd4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5rc1" +__version__ = "0.33.5" -- cgit 1.4.1 From e3aa2c0b75b062efc78388abba0f8b49666d6c54 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 24 Sep 2018 23:40:27 +1000 Subject: towncrier --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 00dbed7021..58fb9acdda 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.33.5 (2018-09-24) +=========================== + +No significant changes. + + Synapse 0.33.5rc1 (2018-09-17) ============================== -- cgit 1.4.1 From 7d3f639844f50061367db25cac0bd4d189648678 Mon Sep 17 00:00:00 2001 From: Oleg Girko Date: Mon, 24 Sep 2018 16:51:59 +0100 Subject: Fix compatibility issue with older Twisted in tests. Older Twisted (18.4.0) returns TimeoutError instead of ConnectingCancelledError when connection times out. This change allows tests to be compatible with this behaviour. Signed-off-by: Oleg Girko --- tests/http/test_fedclient.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 1c46c9cfeb..d45d2bcb16 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -49,7 +49,7 @@ class FederationClientTests(HomeserverTestCase): def test_client_never_connect(self): """ If the HTTP request is not connected and is timed out, it'll give a - ConnectingCancelledError. + ConnectingCancelledError or TimeoutError. """ d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) @@ -71,7 +71,7 @@ class FederationClientTests(HomeserverTestCase): self.reactor.advance(10.5) f = self.failureResultOf(d) - self.assertIsInstance(f.value, ConnectingCancelledError) + self.assertIsInstance(f.value, (ConnectingCancelledError, TimeoutError)) def test_client_connect_no_response(self): """ -- cgit 1.4.1 From e37c221b9792caf5a451079d9ce995609fa624f9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 25 Sep 2018 02:51:55 +1000 Subject: changelog for 3940 --- changelog.d/3940.misc | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelog.d/3940.misc diff --git a/changelog.d/3940.misc b/changelog.d/3940.misc new file mode 100644 index 0000000000..a39d2bde3e --- /dev/null +++ b/changelog.d/3940.misc @@ -0,0 +1,2 @@ +Fix incompatibility with older Twisted version in tests. Thanks +@OlegGirko! -- cgit 1.4.1 From 6b6cb32297b29a1dc6c415150b8a9a37b38349bf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 25 Sep 2018 02:54:34 +1000 Subject: bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 58244a5dd4..b1f7a89fba 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5" +__version__ = "0.33.5.1" -- cgit 1.4.1 From fc691ca97ca171b81faef83e116a2c11a6816e1b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 25 Sep 2018 02:54:59 +1000 Subject: changelog --- CHANGES.md | 10 ++++++++++ changelog.d/3940.misc | 2 -- 2 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/3940.misc diff --git a/CHANGES.md b/CHANGES.md index 58fb9acdda..45d3cdb131 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 0.33.5.1 (2018-09-25) +============================= + +Internal Changes +---------------- + +- Fix incompatibility with older Twisted version in tests. Thanks + @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940)) + + Synapse 0.33.5 (2018-09-24) =========================== diff --git a/changelog.d/3940.misc b/changelog.d/3940.misc deleted file mode 100644 index a39d2bde3e..0000000000 --- a/changelog.d/3940.misc +++ /dev/null @@ -1,2 +0,0 @@ -Fix incompatibility with older Twisted version in tests. Thanks -@OlegGirko! -- cgit 1.4.1 From 787d22ed6cbc8f78fed7e316d345e54b7c6a93da Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 25 Sep 2018 00:49:26 +0100 Subject: Only lazy load self-members on initial sync Given we have disabled lazy loading for incr syncs in #3840, we can make self-LL more efficient by only doing it on initial sync. Also adds a bounds check for if/when we change our mind, so that we don't try to include LL members on sync responses with no timeline. --- changelog.d/3936.bugfix | 1 + synapse/handlers/sync.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3936.bugfix diff --git a/changelog.d/3936.bugfix b/changelog.d/3936.bugfix new file mode 100644 index 0000000000..49b02b9e27 --- /dev/null +++ b/changelog.d/3936.bugfix @@ -0,0 +1 @@ +Fix out-of-bounds error when LLing yourself diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b598916b21..c7d69d9d80 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -713,10 +713,6 @@ class SyncHandler(object): ) ] - # always make sure we LL ourselves so we know we're in the room - # (if we are), to fix https://github.com/vector-im/riot-web/issues/7209 - types.append((EventTypes.Member, sync_config.user.to_string())) - # only apply the filtering to room members filtered_types = [EventTypes.Member] @@ -726,6 +722,13 @@ class SyncHandler(object): } if full_state: + if lazy_load_members: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + types.append((EventTypes.Member, sync_config.user.to_string())) + if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, types=types, @@ -794,7 +797,7 @@ class SyncHandler(object): else: state_ids = {} if lazy_load_members: - if types: + if types and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. -- cgit 1.4.1 From a9d84f4e44783bc75dd931093339636732bfbf6c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 10:43:39 +0100 Subject: We require attrs 16.0.0 Ref: https://github.com/matrix-org/synapse/issues/3945 --- synapse/python_dependencies.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0d8de600cf..c779f69fa0 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -58,7 +58,9 @@ REQUIREMENTS = { "phonenumbers>=8.2.0": ["phonenumbers"], "six": ["six"], "prometheus_client": ["prometheus_client"], - "attrs": ["attr"], + + # we use attr.s(slots), which arrived in 16.0.0 + "attrs>=16.0.0": ["attr>=16.0.0"], "netaddr>=0.7.18": ["netaddr"], } -- cgit 1.4.1 From 45a1053d447903aae9b75d9a86016e71034917b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 10:45:34 +0100 Subject: changelog --- changelog.d/3947.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3947.misc diff --git a/changelog.d/3947.misc b/changelog.d/3947.misc new file mode 100644 index 0000000000..5a9a22bed9 --- /dev/null +++ b/changelog.d/3947.misc @@ -0,0 +1 @@ +Require attrs 16.0.0 or later -- cgit 1.4.1 From c53336986d9208925438a8e2366bac7d1267a95b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 11:19:27 +0100 Subject: Move synctl into top dir to avoid a symlink symlinks apparently break setuptools on python3 and alpine (https://bugs.python.org/issue31940), so let's stop using a symlink and just use the file directly. --- synapse/app/synctl.py | 284 ------------------------------------------------- synctl | 285 +++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 284 insertions(+), 285 deletions(-) delete mode 100755 synapse/app/synctl.py mode change 120000 => 100755 synctl diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py deleted file mode 100755 index d658f967ba..0000000000 --- a/synapse/app/synctl.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import collections -import errno -import glob -import os -import os.path -import signal -import subprocess -import sys -import time - -from six import iteritems - -import yaml - -SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] - -GREEN = "\x1b[1;32m" -YELLOW = "\x1b[1;33m" -RED = "\x1b[1;31m" -NORMAL = "\x1b[m" - - -def pid_running(pid): - try: - os.kill(pid, 0) - return True - except OSError as err: - if err.errno == errno.EPERM: - return True - return False - - -def write(message, colour=NORMAL, stream=sys.stdout): - if colour == NORMAL: - stream.write(message + "\n") - else: - stream.write(colour + message + NORMAL + "\n") - - -def abort(message, colour=RED, stream=sys.stderr): - write(message, colour, stream) - sys.exit(1) - - -def start(configfile): - write("Starting ...") - args = SYNAPSE - args.extend(["--daemonize", "-c", configfile]) - - try: - subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % - (configfile,), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting (exit code: %d); see above for logs" % e.returncode, - colour=RED, - ) - - -def start_worker(app, configfile, worker_configfile): - args = [ - "python", "-B", - "-m", app, - "-c", configfile, - "-c", worker_configfile - ] - - try: - subprocess.check_call(args) - write("started %s(%r)" % (app, worker_configfile), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting %s(%r) (exit code: %d); see above for logs" % ( - app, worker_configfile, e.returncode, - ), - colour=RED, - ) - - -def stop(pidfile, app): - if os.path.exists(pidfile): - pid = int(open(pidfile).read()) - try: - os.kill(pid, signal.SIGTERM) - write("stopped %s" % (app,), colour=GREEN) - except OSError as err: - if err.errno == errno.ESRCH: - write("%s not running" % (app,), colour=YELLOW) - elif err.errno == errno.EPERM: - abort("Cannot stop %s: Operation not permitted" % (app,)) - else: - abort("Cannot stop %s: Unknown error" % (app,)) - - -Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor" -]) - - -def main(): - - parser = argparse.ArgumentParser() - - parser.add_argument( - "action", - choices=["start", "stop", "restart"], - help="whether to start, stop or restart the synapse", - ) - parser.add_argument( - "configfile", - nargs="?", - default="homeserver.yaml", - help="the homeserver config file, defaults to homeserver.yaml", - ) - parser.add_argument( - "-w", "--worker", - metavar="WORKERCONFIG", - help="start or stop a single worker", - ) - parser.add_argument( - "-a", "--all-processes", - metavar="WORKERCONFIGDIR", - help="start or stop all the workers in the given directory" - " and the main synapse process", - ) - - options = parser.parse_args() - - if options.worker and options.all_processes: - write( - 'Cannot use "--worker" with "--all-processes"', - stream=sys.stderr - ) - sys.exit(1) - - configfile = options.configfile - - if not os.path.exists(configfile): - write( - "No config file found\n" - "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), options.configfile - ), - stream=sys.stderr, - ) - sys.exit(1) - - with open(configfile) as stream: - config = yaml.load(stream) - - pidfile = config["pid_file"] - cache_factor = config.get("synctl_cache_factor") - start_stop_synapse = True - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - - cache_factors = config.get("synctl_cache_factors", {}) - for cache_name, factor in iteritems(cache_factors): - os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - - worker_configfiles = [] - if options.worker: - start_stop_synapse = False - worker_configfile = options.worker - if not os.path.exists(worker_configfile): - write( - "No worker config found at %r" % (worker_configfile,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.append(worker_configfile) - - if options.all_processes: - # To start the main synapse with -a you need to add a worker file - # with worker_app == "synapse.app.homeserver" - start_stop_synapse = False - worker_configdir = options.all_processes - if not os.path.isdir(worker_configdir): - write( - "No worker config directory found at %r" % (worker_configdir,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.extend(sorted(glob.glob( - os.path.join(worker_configdir, "*.yaml") - ))) - - workers = [] - for worker_configfile in worker_configfiles: - with open(worker_configfile) as stream: - worker_config = yaml.load(stream) - worker_app = worker_config["worker_app"] - if worker_app == "synapse.app.homeserver": - # We need to special case all of this to pick up options that may - # be set in the main config file or in this worker config file. - worker_pidfile = ( - worker_config.get("pid_file") - or pidfile - ) - worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor - daemonize = worker_config.get("daemonize") or config.get("daemonize") - assert daemonize, "Main process must have daemonize set to true" - - # The master process doesn't support using worker_* config. - for key in worker_config: - if key == "worker_app": # But we allow worker_app - continue - assert not key.startswith("worker_"), \ - "Main process cannot use worker_* config" - else: - worker_pidfile = worker_config["worker_pid_file"] - worker_daemonize = worker_config["worker_daemonize"] - assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, "worker_daemonize") - worker_cache_factor = worker_config.get("synctl_cache_factor") - workers.append(Worker( - worker_app, worker_configfile, worker_pidfile, worker_cache_factor, - )) - - action = options.action - - if action == "stop" or action == "restart": - for worker in workers: - stop(worker.pidfile, worker.app) - - if start_stop_synapse: - stop(pidfile, "synapse.app.homeserver") - - # Wait for synapse to actually shutdown before starting it again - if action == "restart": - running_pids = [] - if start_stop_synapse and os.path.exists(pidfile): - running_pids.append(int(open(pidfile).read())) - for worker in workers: - if os.path.exists(worker.pidfile): - running_pids.append(int(open(worker.pidfile).read())) - if len(running_pids) > 0: - write("Waiting for process to exit before restarting...") - for running_pid in running_pids: - while pid_running(running_pid): - time.sleep(0.2) - write("All processes exited; now restarting...") - - if action == "start" or action == "restart": - if start_stop_synapse: - # Check if synapse is already running - if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): - abort("synapse.app.homeserver already running") - start(configfile) - - for worker in workers: - if worker.cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - - start_worker(worker.app, configfile, worker.configfile) - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - else: - os.environ.pop("SYNAPSE_CACHE_FACTOR", None) - - -if __name__ == "__main__": - main() diff --git a/synctl b/synctl deleted file mode 120000 index 1bdceda20a..0000000000 --- a/synctl +++ /dev/null @@ -1 +0,0 @@ -./synapse/app/synctl.py \ No newline at end of file diff --git a/synctl b/synctl new file mode 100755 index 0000000000..d658f967ba --- /dev/null +++ b/synctl @@ -0,0 +1,284 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import errno +import glob +import os +import os.path +import signal +import subprocess +import sys +import time + +from six import iteritems + +import yaml + +SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] + +GREEN = "\x1b[1;32m" +YELLOW = "\x1b[1;33m" +RED = "\x1b[1;31m" +NORMAL = "\x1b[m" + + +def pid_running(pid): + try: + os.kill(pid, 0) + return True + except OSError as err: + if err.errno == errno.EPERM: + return True + return False + + +def write(message, colour=NORMAL, stream=sys.stdout): + if colour == NORMAL: + stream.write(message + "\n") + else: + stream.write(colour + message + NORMAL + "\n") + + +def abort(message, colour=RED, stream=sys.stderr): + write(message, colour, stream) + sys.exit(1) + + +def start(configfile): + write("Starting ...") + args = SYNAPSE + args.extend(["--daemonize", "-c", configfile]) + + try: + subprocess.check_call(args) + write("started synapse.app.homeserver(%r)" % + (configfile,), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting (exit code: %d); see above for logs" % e.returncode, + colour=RED, + ) + + +def start_worker(app, configfile, worker_configfile): + args = [ + "python", "-B", + "-m", app, + "-c", configfile, + "-c", worker_configfile + ] + + try: + subprocess.check_call(args) + write("started %s(%r)" % (app, worker_configfile), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting %s(%r) (exit code: %d); see above for logs" % ( + app, worker_configfile, e.returncode, + ), + colour=RED, + ) + + +def stop(pidfile, app): + if os.path.exists(pidfile): + pid = int(open(pidfile).read()) + try: + os.kill(pid, signal.SIGTERM) + write("stopped %s" % (app,), colour=GREEN) + except OSError as err: + if err.errno == errno.ESRCH: + write("%s not running" % (app,), colour=YELLOW) + elif err.errno == errno.EPERM: + abort("Cannot stop %s: Operation not permitted" % (app,)) + else: + abort("Cannot stop %s: Unknown error" % (app,)) + + +Worker = collections.namedtuple("Worker", [ + "app", "configfile", "pidfile", "cache_factor" +]) + + +def main(): + + parser = argparse.ArgumentParser() + + parser.add_argument( + "action", + choices=["start", "stop", "restart"], + help="whether to start, stop or restart the synapse", + ) + parser.add_argument( + "configfile", + nargs="?", + default="homeserver.yaml", + help="the homeserver config file, defaults to homeserver.yaml", + ) + parser.add_argument( + "-w", "--worker", + metavar="WORKERCONFIG", + help="start or stop a single worker", + ) + parser.add_argument( + "-a", "--all-processes", + metavar="WORKERCONFIGDIR", + help="start or stop all the workers in the given directory" + " and the main synapse process", + ) + + options = parser.parse_args() + + if options.worker and options.all_processes: + write( + 'Cannot use "--worker" with "--all-processes"', + stream=sys.stderr + ) + sys.exit(1) + + configfile = options.configfile + + if not os.path.exists(configfile): + write( + "No config file found\n" + "To generate a config file, run '%s -c %s --generate-config" + " --server-name='\n" % ( + " ".join(SYNAPSE), options.configfile + ), + stream=sys.stderr, + ) + sys.exit(1) + + with open(configfile) as stream: + config = yaml.load(stream) + + pidfile = config["pid_file"] + cache_factor = config.get("synctl_cache_factor") + start_stop_synapse = True + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + + cache_factors = config.get("synctl_cache_factors", {}) + for cache_name, factor in iteritems(cache_factors): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + worker_configfiles = [] + if options.worker: + start_stop_synapse = False + worker_configfile = options.worker + if not os.path.exists(worker_configfile): + write( + "No worker config found at %r" % (worker_configfile,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.append(worker_configfile) + + if options.all_processes: + # To start the main synapse with -a you need to add a worker file + # with worker_app == "synapse.app.homeserver" + start_stop_synapse = False + worker_configdir = options.all_processes + if not os.path.isdir(worker_configdir): + write( + "No worker config directory found at %r" % (worker_configdir,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.extend(sorted(glob.glob( + os.path.join(worker_configdir, "*.yaml") + ))) + + workers = [] + for worker_configfile in worker_configfiles: + with open(worker_configfile) as stream: + worker_config = yaml.load(stream) + worker_app = worker_config["worker_app"] + if worker_app == "synapse.app.homeserver": + # We need to special case all of this to pick up options that may + # be set in the main config file or in this worker config file. + worker_pidfile = ( + worker_config.get("pid_file") + or pidfile + ) + worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor + daemonize = worker_config.get("daemonize") or config.get("daemonize") + assert daemonize, "Main process must have daemonize set to true" + + # The master process doesn't support using worker_* config. + for key in worker_config: + if key == "worker_app": # But we allow worker_app + continue + assert not key.startswith("worker_"), \ + "Main process cannot use worker_* config" + else: + worker_pidfile = worker_config["worker_pid_file"] + worker_daemonize = worker_config["worker_daemonize"] + assert worker_daemonize, "In config %r: expected '%s' to be True" % ( + worker_configfile, "worker_daemonize") + worker_cache_factor = worker_config.get("synctl_cache_factor") + workers.append(Worker( + worker_app, worker_configfile, worker_pidfile, worker_cache_factor, + )) + + action = options.action + + if action == "stop" or action == "restart": + for worker in workers: + stop(worker.pidfile, worker.app) + + if start_stop_synapse: + stop(pidfile, "synapse.app.homeserver") + + # Wait for synapse to actually shutdown before starting it again + if action == "restart": + running_pids = [] + if start_stop_synapse and os.path.exists(pidfile): + running_pids.append(int(open(pidfile).read())) + for worker in workers: + if os.path.exists(worker.pidfile): + running_pids.append(int(open(worker.pidfile).read())) + if len(running_pids) > 0: + write("Waiting for process to exit before restarting...") + for running_pid in running_pids: + while pid_running(running_pid): + time.sleep(0.2) + write("All processes exited; now restarting...") + + if action == "start" or action == "restart": + if start_stop_synapse: + # Check if synapse is already running + if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): + abort("synapse.app.homeserver already running") + start(configfile) + + for worker in workers: + if worker.cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + + start_worker(worker.app, configfile, worker.configfile) + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + else: + os.environ.pop("SYNAPSE_CACHE_FACTOR", None) + + +if __name__ == "__main__": + main() -- cgit 1.4.1 From bd469adaa9f3ae079e06d3510f61da537c5ee1cd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 11:22:17 +0100 Subject: changelog --- changelog.d/3948.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3948.misc diff --git a/changelog.d/3948.misc b/changelog.d/3948.misc new file mode 100644 index 0000000000..a93edbd1c3 --- /dev/null +++ b/changelog.d/3948.misc @@ -0,0 +1 @@ +Fix incompatibility with python3 on alpine \ No newline at end of file -- cgit 1.4.1 From 6cf261930ac4c5f08cd3d6bf1f5a37a2889f6e7b Mon Sep 17 00:00:00 2001 From: Jérémy Farnaud Date: Tue, 25 Sep 2018 12:55:02 +0200 Subject: added "media-src: 'self'" to CSP for resources (#3578) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Synapse doesn’t allow for media resources to be played directly from Chrome. It is a problem for users on other networks (e.g. IRC) communicating with Matrix users through a gateway. The gateway sends them the raw URL for the resource when a Matrix user uploads a video and the video cannot be played directly in Chrome using that URL. Chrome argues it is not authorized to play the video because of the Content Security Policy. Chrome checks for the "media-src" policy which is missing, and defauts to the "default-src" policy which is "none". As Synapse already sends "object-src: 'self'" I thought it wouldn’t be a problem to add "media-src: 'self'" to the CSP to fix this problem. --- changelog.d/3578.bugfix | 1 + synapse/rest/media/v1/download_resource.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/3578.bugfix diff --git a/changelog.d/3578.bugfix b/changelog.d/3578.bugfix new file mode 100644 index 0000000000..9c52b6fa7e --- /dev/null +++ b/changelog.d/3578.bugfix @@ -0,0 +1 @@ +Fix problem when playing media from Chrome using direct URL (thanks @remjey!) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index ca90964d1d..f911b120b1 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -52,6 +52,7 @@ class DownloadResource(Resource): b" script-src 'none';" b" plugin-types application/pdf;" b" style-src 'unsafe-inline';" + b" media-src 'self';" b" object-src 'self';" ) server_name, media_id, name = parse_media_id(request) -- cgit 1.4.1 From 0649306fde79c56e9668682ab7285fdc2e5483c9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 13:29:28 +0100 Subject: Update grafana dashboard --- contrib/grafana/synapse.json | 592 ++++++++++++++++++++++++++++++------------- 1 file changed, 417 insertions(+), 175 deletions(-) diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index c58612594a..dc3f4a1d1c 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -14,7 +14,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "5.2.0" + "version": "5.2.4" }, { "type": "panel", @@ -54,7 +54,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1533598785368, + "iteration": 1537878047048, "links": [ { "asDropdown": true, @@ -86,7 +86,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -118,7 +118,7 @@ "steppedLine": false, "targets": [ { - "expr": "process_cpu_seconds:rate2m{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} ", @@ -179,7 +179,7 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "gridPos": { "h": 9, "w": 12, @@ -525,7 +525,7 @@ "x": 0, "y": 25 }, - "id": 48, + "id": 50, "legend": { "avg": false, "current": false, @@ -549,8 +549,9 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])", "format": "time_series", + "interval": "", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}}", "refId": "A", @@ -560,7 +561,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Avg time waiting for db conn", + "title": "Avg reactor tick time", "tooltip": { "shared": true, "sort": 0, @@ -576,12 +577,11 @@ }, "yaxes": [ { - "decimals": null, "format": "s", - "label": "", + "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -604,6 +604,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", "fill": 1, "gridPos": { "h": 7, @@ -611,7 +612,7 @@ "x": 12, "y": 25 }, - "id": 49, + "id": 105, "legend": { "avg": false, "current": false, @@ -629,33 +630,47 @@ "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/^up/", - "legend": false, - "yaxis": 2 - } - ], + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", + "legendFormat": "{{job}}-{{index}} 99%", "refId": "A", "step": 20 + }, + { + "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 95%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 90%", + "refId": "C" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Prometheus scrape time", + "title": "Reactor tick quantiles", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -673,16 +688,15 @@ "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { - "decimals": 0, - "format": "none", - "label": "", + "format": "short", + "label": null, "logBase": 1, - "max": "0", - "min": "-1", + "max": null, + "min": null, "show": false } ], @@ -697,14 +711,14 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, + "fill": 0, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 32 }, - "id": 50, + "id": 53, "legend": { "avg": false, "current": false, @@ -728,19 +742,17 @@ "steppedLine": false, "targets": [ { - "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])", + "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", - "interval": "", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Avg reactor tick time", + "title": "Up", "tooltip": { "shared": true, "sort": 0, @@ -756,7 +768,7 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -769,7 +781,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -783,26 +795,19 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "editable": true, - "error": false, "fill": 1, - "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 32 }, - "id": 5, + "id": 49, "legend": { - "alignAsTable": false, "avg": false, "current": false, - "hideEmpty": false, - "hideZero": false, "max": false, "min": false, - "rightSide": false, "show": true, "total": false, "values": false @@ -817,10 +822,9 @@ "renderer": "flot", "seriesOverrides": [ { - "alias": "/user/" - }, - { - "alias": "/system/" + "alias": "/^up/", + "legend": false, + "yaxis": 2 } ], "spaceLength": 10, @@ -828,44 +832,19 @@ "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} system ", - "metric": "", - "refId": "B", - "step": 20 - }, - { - "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", - "hide": false, "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} user", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", "refId": "A", "step": 20 } ], - "thresholds": [ - { - "colorMode": "custom", - "line": true, - "lineColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 0.5 - }, - { - "colorMode": "custom", - "line": true, - "lineColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 0.8 - } - ], + "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "CPU", + "title": "Prometheus scrape time", "tooltip": { "shared": true, "sort": 0, @@ -881,20 +860,21 @@ }, "yaxes": [ { - "decimals": null, - "format": "percentunit", - "label": "", + "format": "s", + "label": null, "logBase": 1, - "max": "1.2", - "min": 0, + "max": null, + "min": "0", "show": true }, { - "format": "short", + "decimals": 0, + "format": "none", + "label": "", "logBase": 1, - "max": null, - "min": null, - "show": true + "max": "0", + "min": "-1", + "show": false } ], "yaxis": { @@ -907,20 +887,27 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 0, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 39 }, - "id": 53, + "id": 5, "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, + "hideZero": false, "max": false, "min": false, + "rightSide": false, "show": true, "total": false, "values": false @@ -933,23 +920,57 @@ "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "/user/" + }, + { + "alias": "/system/" + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A" + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} system ", + "metric": "", + "refId": "B", + "step": 20 + }, + { + "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} user", + "refId": "A", + "step": 20 + } + ], + "thresholds": [ + { + "colorMode": "custom", + "line": true, + "lineColor": "rgba(216, 200, 27, 0.27)", + "op": "gt", + "value": 0.5 + }, + { + "colorMode": "custom", + "line": true, + "lineColor": "rgba(234, 112, 112, 0.22)", + "op": "gt", + "value": 0.8 } ], - "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Up", + "title": "CPU", "tooltip": { "shared": true, "sort": 0, @@ -965,16 +986,16 @@ }, "yaxes": [ { - "format": "short", - "label": null, + "decimals": null, + "format": "percentunit", + "label": "", "logBase": 1, - "max": null, - "min": null, + "max": "1.2", + "min": 0, "show": true }, { "format": "short", - "label": null, "logBase": 1, "max": null, "min": null, @@ -1013,7 +1034,7 @@ "h": 7, "w": 12, "x": 0, - "y": 49 + "y": 47 }, "id": 40, "legend": { @@ -1098,7 +1119,7 @@ "h": 7, "w": 12, "x": 12, - "y": 49 + "y": 47 }, "id": 46, "legend": { @@ -1187,7 +1208,7 @@ "h": 7, "w": 12, "x": 0, - "y": 56 + "y": 54 }, "id": 44, "legend": { @@ -1276,7 +1297,7 @@ "h": 7, "w": 12, "x": 12, - "y": 56 + "y": 54 }, "id": 45, "legend": { @@ -1383,7 +1404,7 @@ "h": 8, "w": 12, "x": 0, - "y": 48 + "y": 62 }, "id": 4, "legend": { @@ -1490,7 +1511,7 @@ "h": 8, "w": 12, "x": 12, - "y": 48 + "y": 62 }, "id": 32, "legend": { @@ -1578,7 +1599,7 @@ "h": 8, "w": 12, "x": 0, - "y": 56 + "y": 70 }, "id": 23, "legend": { @@ -1688,7 +1709,7 @@ "h": 8, "w": 12, "x": 12, - "y": 56 + "y": 70 }, "id": 52, "legend": { @@ -1795,7 +1816,7 @@ "h": 8, "w": 12, "x": 0, - "y": 64 + "y": 78 }, "id": 7, "legend": { @@ -1886,7 +1907,7 @@ "h": 8, "w": 12, "x": 12, - "y": 64 + "y": 78 }, "id": 47, "legend": { @@ -1969,13 +1990,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 72 + "y": 86 }, "id": 103, "legend": { @@ -2069,13 +2090,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 23 + "y": 49 }, "id": 99, "legend": { @@ -2154,13 +2175,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 23 + "y": 49 }, "id": 101, "legend": { @@ -2186,17 +2207,24 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", + "hide": false, "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{name}}", "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "DB usage by background jobs", + "title": "DB usage by background jobs (including scheduling time)", "tooltip": { "shared": true, "sort": 0, @@ -2252,13 +2280,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 25 + "y": 64 }, "id": 79, "legend": { @@ -2336,13 +2364,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 25 + "y": 64 }, "id": 83, "legend": { @@ -2447,7 +2475,7 @@ "h": 7, "w": 12, "x": 0, - "y": 23 + "y": 65 }, "id": 51, "legend": { @@ -2545,6 +2573,194 @@ }, "id": 58, "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Avg time waiting for db conn", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 99%", + "refId": "A", + "step": 20 + }, + { + "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 95%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 90%", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Db scheduling time quantiles", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, { "aliasColors": {}, "bars": false, @@ -2559,7 +2775,7 @@ "h": 7, "w": 12, "x": 0, - "y": 25 + "y": 31 }, "id": 10, "legend": { @@ -2648,7 +2864,7 @@ "h": 7, "w": 12, "x": 12, - "y": 25 + "y": 31 }, "id": 11, "legend": { @@ -2672,11 +2888,11 @@ "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": true, "targets": [ { - "expr": "topk(5, rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, "interval": "", @@ -2753,7 +2969,7 @@ "h": 13, "w": 12, "x": 0, - "y": 17 + "y": 60 }, "id": 12, "legend": { @@ -2841,7 +3057,7 @@ "h": 13, "w": 12, "x": 12, - "y": 17 + "y": 60 }, "id": 26, "legend": { @@ -2929,7 +3145,7 @@ "h": 13, "w": 12, "x": 0, - "y": 30 + "y": 73 }, "id": 13, "legend": { @@ -3017,7 +3233,7 @@ "h": 13, "w": 12, "x": 12, - "y": 30 + "y": 73 }, "id": 27, "legend": { @@ -3105,7 +3321,7 @@ "h": 13, "w": 12, "x": 0, - "y": 43 + "y": 86 }, "id": 28, "legend": { @@ -3192,7 +3408,7 @@ "h": 13, "w": 12, "x": 12, - "y": 43 + "y": 86 }, "id": 25, "legend": { @@ -3295,7 +3511,7 @@ "h": 10, "w": 12, "x": 0, - "y": 55 + "y": 68 }, "id": 1, "legend": { @@ -3387,7 +3603,7 @@ "h": 10, "w": 12, "x": 12, - "y": 55 + "y": 68 }, "id": 8, "legend": { @@ -3477,7 +3693,7 @@ "h": 10, "w": 12, "x": 0, - "y": 65 + "y": 78 }, "id": 38, "legend": { @@ -3563,7 +3779,7 @@ "h": 10, "w": 12, "x": 12, - "y": 65 + "y": 78 }, "id": 39, "legend": { @@ -3643,13 +3859,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 75 + "y": 88 }, "id": 65, "legend": { @@ -3745,13 +3961,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 90 + "y": 27 }, "id": 91, "legend": { @@ -3841,7 +4057,7 @@ "h": 9, "w": 12, "x": 12, - "y": 90 + "y": 27 }, "id": 21, "legend": { @@ -3920,13 +4136,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 99 + "y": 36 }, "id": 89, "legend": { @@ -4006,13 +4222,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 99 + "y": 36 }, "id": 93, "legend": { @@ -4027,7 +4243,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4090,13 +4306,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 108 + "y": 45 }, "id": 95, "legend": { @@ -4189,7 +4405,7 @@ "h": 9, "w": 12, "x": 12, - "y": 108 + "y": 45 }, "heatmap": {}, "highlightCards": true, @@ -4251,13 +4467,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 19 + "y": 97 }, "id": 2, "legend": { @@ -4357,20 +4573,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 19 + "y": 97 }, "id": 41, "legend": { @@ -4439,20 +4659,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 26 + "y": 104 }, "id": 42, "legend": { @@ -4520,20 +4744,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 26 + "y": 104 }, "id": 43, "legend": { @@ -4601,7 +4829,11 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, @@ -4623,7 +4855,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -4644,7 +4876,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4708,7 +4940,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -4729,7 +4961,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4856,9 +5088,19 @@ "selected": false, "text": "5m", "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "15m", + "value": "15m" } ], - "query": "30s,1m,2m,5m", + "query": "30s,1m,2m,5m,10m,15m", "refresh": 2, "type": "interval" }, @@ -4872,7 +5114,7 @@ "multi": false, "name": "instance", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, instance)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", "refresh": 2, "regex": "", "sort": 0, @@ -4895,7 +5137,7 @@ "multiFormat": "regex values", "name": "job", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, job)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", "refresh": 2, "refresh_on_load": false, "regex": "", @@ -4919,7 +5161,7 @@ "multiFormat": "regex values", "name": "index", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, index)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", "refresh": 2, "refresh_on_load": false, "regex": "", @@ -4965,5 +5207,5 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 127 + "version": 3 } \ No newline at end of file -- cgit 1.4.1 From e7fa032126855f783b9246e2f03d8b62b58f5521 Mon Sep 17 00:00:00 2001 From: Jan Christian Grünhage Date: Tue, 25 Sep 2018 14:33:19 +0200 Subject: Update .dockerignore --- .dockerignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 81e8f6d50a..0180602e56 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,7 +2,6 @@ Dockerfile .travis.yml .gitignore demo/etc -synctl tox.ini .git/* .tox/* -- cgit 1.4.1 From df55a943cadaba60b9171f2e70f62dc4b38fbd78 Mon Sep 17 00:00:00 2001 From: Jan Christian Grünhage Date: Tue, 25 Sep 2018 14:33:38 +0200 Subject: Update Dockerfile --- docker/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index ad0ae0b734..1d00defc2d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,6 @@ RUN apk add --no-cache --virtual .build_deps \ postgresql-dev \ zlib-dev \ && cd /synapse \ - && sed -i 's/\["synctl"\] + //' setup.py \ && apk add --no-cache --virtual .runtime_deps \ libffi \ libjpeg-turbo \ -- cgit 1.4.1 From 3baf6e1667f0c257f288a28bd542a93c2445fd30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 16:25:42 +0100 Subject: Fix ExpiringCache.__len__ to be accurate It used to try and produce an estimate, which was sometimes negative. This caused metrics to be sad, so lets always just calculate it from scratch. (This appears to have been a longstanding bug, but one which has been made more of a problem by #3932 and #3933). (This was originally done by Erik as part of #3933. I'm cherry-picking it because really it's a fix in its own right) --- synapse/util/caches/expiringcache.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 921a9c5b29..9af4ec4aa8 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,6 +16,8 @@ import logging from collections import OrderedDict +from six import itervalues + from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -54,8 +56,6 @@ class ExpiringCache(object): self.iterable = iterable - self._size_estimate = 0 - self.metrics = register_cache("expiring", cache_name, self) if not self._expiry_ms: @@ -74,16 +74,11 @@ class ExpiringCache(object): now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) - if self.iterable: - self._size_estimate += len(value) - # Evict if there are now too many items while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - removed_len = len(value.value) - self.metrics.inc_evictions(removed_len) - self._size_estimate -= removed_len + self.metrics.inc_evictions(len(value.value)) else: self.metrics.inc_evictions() @@ -134,7 +129,9 @@ class ExpiringCache(object): for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() logger.debug( "[%s] _prune_cache before: %d, after len: %d", @@ -143,7 +140,7 @@ class ExpiringCache(object): def __len__(self): if self.iterable: - return self._size_estimate + return sum(len(entry.value) for entry in itervalues(self._cache)) else: return len(self._cache) -- cgit 1.4.1 From 8ac9fa73752b35e0d4feaeef33c5dcca018fca07 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 12:35:10 +0100 Subject: changelog --- changelog.d/3956.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3956.bugfix diff --git a/changelog.d/3956.bugfix b/changelog.d/3956.bugfix new file mode 100644 index 0000000000..b0828c9fc6 --- /dev/null +++ b/changelog.d/3956.bugfix @@ -0,0 +1 @@ +Fix exceptions from metrics handler \ No newline at end of file -- cgit 1.4.1 From 7ee94fc1baf6c4e2970232383d76277d8378712b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 12:40:20 +0100 Subject: Log which cache is throwing exceptions --- synapse/util/caches/__init__.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 7b065b195e..f37d5bec08 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import os import six @@ -20,6 +21,8 @@ from six.moves import intern from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily +logger = logging.getLogger(__name__) + CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) @@ -76,16 +79,20 @@ def register_cache(cache_type, cache_name, cache): return [] def collect(self): - if cache_type == "response_cache": - response_cache_size.labels(cache_name).set(len(cache)) - response_cache_hits.labels(cache_name).set(self.hits) - response_cache_evicted.labels(cache_name).set(self.evicted_size) - response_cache_total.labels(cache_name).set(self.hits + self.misses) - else: - cache_size.labels(cache_name).set(len(cache)) - cache_hits.labels(cache_name).set(self.hits) - cache_evicted.labels(cache_name).set(self.evicted_size) - cache_total.labels(cache_name).set(self.hits + self.misses) + try: + if cache_type == "response_cache": + response_cache_size.labels(cache_name).set(len(cache)) + response_cache_hits.labels(cache_name).set(self.hits) + response_cache_evicted.labels(cache_name).set(self.evicted_size) + response_cache_total.labels(cache_name).set(self.hits + self.misses) + else: + cache_size.labels(cache_name).set(len(cache)) + cache_hits.labels(cache_name).set(self.hits) + cache_evicted.labels(cache_name).set(self.evicted_size) + cache_total.labels(cache_name).set(self.hits + self.misses) + except Exception as e: + logger.warn("Error calculating metrics for %s: %s", cache_name, e) + raise yield GaugeMetricFamily("__unused", "") -- cgit 1.4.1 From 8834396b8ac96f76263d80dea9b559e2597215a6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Feb 2018 21:09:33 +0000 Subject: Actuall set cache factors in workers --- synapse/app/synctl.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index d658f967ba..356e5cb6a7 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -111,7 +111,7 @@ def stop(pidfile, app): Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor" + "app", "configfile", "pidfile", "cache_factor", "cache_factors", ]) @@ -218,6 +218,10 @@ def main(): or pidfile ) worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor + worker_cache_factors = ( + worker_config.get("synctl_cache_factors") + or cache_factors + ) daemonize = worker_config.get("daemonize") or config.get("daemonize") assert daemonize, "Main process must have daemonize set to true" @@ -233,8 +237,10 @@ def main(): assert worker_daemonize, "In config %r: expected '%s' to be True" % ( worker_configfile, "worker_daemonize") worker_cache_factor = worker_config.get("synctl_cache_factor") + worker_cache_factors = worker_config.get("synctl_cache_factors", {}) workers.append(Worker( worker_app, worker_configfile, worker_pidfile, worker_cache_factor, + worker_cache_factors, )) action = options.action @@ -269,15 +275,19 @@ def main(): start(configfile) for worker in workers: + env = os.environ.copy() + if worker.cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + for cache_name, factor in worker.cache_factors.iteritems(): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + start_worker(worker.app, configfile, worker.configfile) - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - else: - os.environ.pop("SYNAPSE_CACHE_FACTOR", None) + # Reset env back to the original + os.environ.clear() + os.environ.update(env) if __name__ == "__main__": -- cgit 1.4.1 From 8afddf7afec8c6ee790a10e24fe817d663bfb4ee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 16:34:40 +0100 Subject: Fix error handling for missing auth_event When we were authorizing an event, if there was no `m.room.create` in its auth_events, we would raise a SynapseError with a cryptic message, which then meant that we would bail out of processing any incoming events, rather than storing a rejection for the faulty event and moving on. We should treat the absent event the same as any other auth failure, by raising an AuthError, so that the event is marked as rejected. --- synapse/event_auth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 6baeccca38..02fa46ef7f 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): creation_event = auth_events.get((EventTypes.Create, ""), None) if not creation_event: - raise SynapseError( + raise AuthError( 403, - "Room %r does not exist" % (event.room_id,) + "No create event in auth events", ) creating_domain = get_domain_from_id(event.room_id) -- cgit 1.4.1 From a5e70b31a12c3c75d5e71a98f8ad60621086d47b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 14:41:14 +0100 Subject: changelog --- changelog.d/3960.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3960.bugfix diff --git a/changelog.d/3960.bugfix b/changelog.d/3960.bugfix new file mode 100644 index 0000000000..a01dc60df6 --- /dev/null +++ b/changelog.d/3960.bugfix @@ -0,0 +1 @@ +Fix error message for events with m.room.create missing from auth_events \ No newline at end of file -- cgit 1.4.1 From d4e0861ff94a809ad36c6b951c1e903457c44def Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 27 Sep 2018 00:23:21 +1000 Subject: Reduce the load on our CI (#3957) * changelog * reduce circleci config * plus a handy script * fix regex --- .circleci/config.yml | 28 ++++++++++++++++++++-------- changelog.d/3957.misc | 1 + scripts-dev/next_github_number.sh | 9 +++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) create mode 100644 changelog.d/3957.misc create mode 100755 scripts-dev/next_github_number.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 605430fb3f..521aca22ef 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -99,23 +99,35 @@ workflows: version: 2 build: jobs: - - sytestpy2 - - sytestpy2postgres - - sytestpy3 - - sytestpy3postgres + - sytestpy2: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy2postgres: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy3: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy3postgres: + filters: + branches: + only: /develop|master|release-.*/ - sytestpy2merged: filters: branches: - ignore: /develop|master/ + ignore: /develop|master|release-.*/ - sytestpy2postgresmerged: filters: branches: - ignore: /develop|master/ + ignore: /develop|master|release-.*/ - sytestpy3merged: filters: branches: - ignore: /develop|master/ + ignore: /develop|master|release-.*/ - sytestpy3postgresmerged: filters: branches: - ignore: /develop|master/ + ignore: /develop|master|release-.*/ diff --git a/changelog.d/3957.misc b/changelog.d/3957.misc new file mode 100644 index 0000000000..69d647f119 --- /dev/null +++ b/changelog.d/3957.misc @@ -0,0 +1 @@ +CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. diff --git a/scripts-dev/next_github_number.sh b/scripts-dev/next_github_number.sh new file mode 100755 index 0000000000..376280025a --- /dev/null +++ b/scripts-dev/next_github_number.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e + +# Fetch the current GitHub issue number, add one to it -- presto! The likely +# next PR number. +CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"` +CURRENT_NUMBER=$((CURRENT_NUMBER+1)) +echo $CURRENT_NUMBER \ No newline at end of file -- cgit 1.4.1 From 28781b65e7cb8b0d1fc409d0d78253662b49fe06 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 26 Sep 2018 16:16:41 +0100 Subject: fix #3854 --- synapse/storage/monthly_active_users.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 59580949f1..0fe8c8e24c 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -172,6 +172,10 @@ class MonthlyActiveUsersStore(SQLBaseStore): Deferred[bool]: True if a new entry was created, False if an existing one was updated. """ + # Am consciously deciding to lock the table on the basis that is ought + # never be a big table and alternative approaches (batching multiple + # upserts into a single txn) introduced a lot of extra complexity. + # See https://github.com/matrix-org/synapse/issues/3854 for more is_insert = yield self._simple_upsert( desc="upsert_monthly_active_user", table="monthly_active_users", @@ -181,7 +185,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): values={ "timestamp": int(self._clock.time_msec()), }, - lock=False, ) if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) -- cgit 1.4.1 From d514608b5cc80cfe1f93a9953eb8264162c4a47b Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 26 Sep 2018 16:19:53 +0100 Subject: towncrier --- changelog.d/3961.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3961.bugfix diff --git a/changelog.d/3961.bugfix b/changelog.d/3961.bugfix new file mode 100644 index 0000000000..e46b5834aa --- /dev/null +++ b/changelog.d/3961.bugfix @@ -0,0 +1 @@ +Fix errors due to concurrent monthly_active_user upserts -- cgit 1.4.1 From 607eec0456dde3b7e5b33c3e9297af234659caae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 16:53:55 +0100 Subject: fix docstring for FederationClient.get_state_for_room trivial fixes for docstring --- synapse/federation/federation_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5a92428f56..8bf1ad0c1f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -289,8 +289,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks @log_function def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the `current` state PDUs for a given room from - a remote home server. + """Requests all of the room state at a given event from a remote home server. Args: destination (str): The remote homeserver to query for the state. @@ -298,9 +297,10 @@ class FederationClient(FederationBase): event_id (str): The id of the event we want the state at. Returns: - Deferred: Results in a list of PDUs. + Deferred[Tuple[List[EventBase], List[EventBase]]]: + A list of events in the state, and a list of events in the auth chain + for the given event. """ - try: # First we try and ask for just the IDs, as thats far quicker if # we have most of the state and auth_chain already. -- cgit 1.4.1 From ab59f3d8dab9b0a1c25c8f136ced9f28861a86ec Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 16:51:28 +0100 Subject: changelog --- changelog.d/3963.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3963.misc diff --git a/changelog.d/3963.misc b/changelog.d/3963.misc new file mode 100644 index 0000000000..f1e0eaf18e --- /dev/null +++ b/changelog.d/3963.misc @@ -0,0 +1 @@ +fix docstring for FederationClient.get_state_for_room -- cgit 1.4.1 From 9453c65948bce69f259c1ba1005565d86b1d8730 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 17:38:54 +0100 Subject: remove spurious federation checks on localhost There's really no point in checking for destinations called "localhost" because there is nothing stopping people creating other DNS entries which point to 127.0.0.1. The right fix for this is https://github.com/matrix-org/synapse/issues/3953. Blocking localhost, on the other hand, means that you get a surprise when trying to connect a test server on localhost to an existing server (with a 'normal' server_name). --- synapse/federation/transaction_queue.py | 37 ++++++--------------------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 8cbf8c4f7f..ae47aaae0b 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -137,26 +137,6 @@ class TransactionQueue(object): self._processing_pending_presence = False - def can_send_to(self, destination): - """Can we send messages to the given server? - - We can't send messages to ourselves. If we are running on localhost - then we can only federation with other servers running on localhost. - Otherwise we only federate with servers on a public domain. - - Args: - destination(str): The server we are possibly trying to send to. - Returns: - bool: True if we can send to the server. - """ - - if destination == self.server_name: - return False - if self.server_name.startswith("localhost"): - return destination.startswith("localhost") - else: - return not destination.startswith("localhost") - def notify_new_events(self, current_id): """This gets called when we have some new events we might want to send out to other servers. @@ -279,10 +259,7 @@ class TransactionQueue(object): self._order += 1 destinations = set(destinations) - destinations = set( - dest for dest in destinations if self.can_send_to(dest) - ) - + destinations.discard(self.server_name) logger.debug("Sending to: %s", str(destinations)) if not destinations: @@ -358,7 +335,7 @@ class TransactionQueue(object): for destinations, states in hosts_and_states: for destination in destinations: - if not self.can_send_to(destination): + if destination == self.server_name: continue self.pending_presence_by_dest.setdefault( @@ -377,7 +354,8 @@ class TransactionQueue(object): content=content, ) - if not self.can_send_to(destination): + if not destination == self.server_name: + logger.info("Not sending EDU to ourselves") return sent_edus_counter.inc() @@ -392,11 +370,8 @@ class TransactionQueue(object): self._attempt_new_transaction(destination) def send_device_messages(self, destination): - if destination == self.server_name or destination == "localhost": - return - - if not self.can_send_to(destination): - return + if destination == self.server_name: + logger.info("Not sending device update to ourselves") self._attempt_new_transaction(destination) -- cgit 1.4.1 From 0c4a99ea2d6046c815c8040d8d060795ab787b5f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 16:54:54 +0100 Subject: changelog --- changelog.d/3964.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3964.feature diff --git a/changelog.d/3964.feature b/changelog.d/3964.feature new file mode 100644 index 0000000000..599222eb58 --- /dev/null +++ b/changelog.d/3964.feature @@ -0,0 +1 @@ +Remove spurious check which made 'localhost' servers not work -- cgit 1.4.1 From a87d419a85e1c966da5bb4fc08f3309eb78ecb7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 07:48:35 +0100 Subject: Run notify_app_services as a bg process This ensures that its resource usage metrics get recorded somewhere rather than getting lost. (It also fixes an error when called from a nested logging context which completes before the bg process) --- synapse/notifier.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index f1d92c1395..340b16ce25 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -24,9 +24,10 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import StreamToken from synapse.util.async_helpers import ObservableDeferred, timeout_deferred -from synapse.util.logcontext import PreserveLoggingContext, run_in_background +from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -248,7 +249,10 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - run_in_background(self._notify_app_services, room_stream_id) + run_as_background_process( + "notify_app_services", + self._notify_app_services, room_stream_id, + ) if self.federation_sender: self.federation_sender.notify_new_events(room_stream_id) -- cgit 1.4.1 From 3c37c7e45b1befc6728fc0b26724bdea2e0845bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 17:01:30 +0100 Subject: changelog --- changelog.d/3965.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3965.misc diff --git a/changelog.d/3965.misc b/changelog.d/3965.misc new file mode 100644 index 0000000000..e7e4a9c5a8 --- /dev/null +++ b/changelog.d/3965.misc @@ -0,0 +1 @@ +Run notify_app_services as a bg process -- cgit 1.4.1 From e70b4ce06920102e2460dfb65bc357e2d7e8b794 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 07:56:06 +0100 Subject: Logging improvements Some logging tweaks to help with debugging incoming federation transactions --- changelog.d/3966.misc | 1 + synapse/handlers/federation.py | 4 ++++ synapse/state/v1.py | 14 ++++++++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3966.misc diff --git a/changelog.d/3966.misc b/changelog.d/3966.misc new file mode 100644 index 0000000000..1e3c8e1706 --- /dev/null +++ b/changelog.d/3966.misc @@ -0,0 +1 @@ +Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ccdc3bfa7..a70ae8c830 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -572,6 +572,10 @@ class FederationHandler(BaseHandler): }) seen_ids.add(e.event_id) + logger.info( + "[%s %s] persisting newly-received auth/state events %s", + room_id, event_id, [e["event"].event_id for e in event_infos] + ) yield self._handle_new_events(origin, event_infos) try: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index c95477d318..7a7157b352 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -65,10 +65,15 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): for event_ids in itervalues(conflicted_state) for event_id in event_ids ) + needed_event_count = len(needed_events) if event_map is not None: needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d conflicted events", len(needed_events)) + logger.info( + "Asking for %d/%d conflicted events", + len(needed_events), + needed_event_count, + ) # dict[str, FrozenEvent]: a map from state event id to event. Only includes # the state events which are in conflict (and those in event_map) @@ -85,11 +90,16 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): ) new_needed_events = set(itervalues(auth_events)) + new_needed_event_count = len(new_needed_events) new_needed_events -= needed_events if event_map is not None: new_needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d auth events", len(new_needed_events)) + logger.info( + "Asking for %d/%d auth events", + len(new_needed_events), + new_needed_event_count, + ) state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) -- cgit 1.4.1 From 219606a6eda4aac01bdb24f69ec0d418689e653e Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 13:26:27 -0600 Subject: Fix exception documentation in matrixfederationclient.py --- synapse/http/matrixfederationclient.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 14b12cd1c4..1a79f6305b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -230,7 +230,7 @@ class MatrixFederationHttpClient(object): Returns: Deferred: resolves with the http response object on success. - Fails with ``HTTPRequestException``: if we get an HTTP response + Fails with ``HttpResponseException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -532,7 +532,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -587,7 +587,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -638,7 +638,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -682,7 +682,7 @@ class MatrixFederationHttpClient(object): Deferred: resolves with an (int,dict) tuple of the file length and a dict of the response headers. - Fails with ``HTTPRequestException`` if we get an HTTP response code + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300 Fails with ``NotRetryingDestination`` if we are not yet ready -- cgit 1.4.1 From b4c3bc17343833205d3bb00009074734edce667a Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 13:26:45 -0600 Subject: Handle HttpResponseException more safely for federated groups --- synapse/handlers/groups_local.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 53e5e2648b..e36f22f128 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import (SynapseError, HttpResponseException) from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -37,9 +37,18 @@ def _create_rerouter(func_name): ) else: destination = get_domain_from_id(group_id) - return getattr(self.transport_client, func_name)( + logger.info("Triggering call") + d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) + def h(failure): + failure.trap(HttpResponseException) + e = failure.value + if e.code >= 400 and e.code < 500: + raise SynapseError(e.code, e.msg) + failure.raiseException() + d.addErrback(h) + return d return f -- cgit 1.4.1 From 2a7b3439de5bab6c9bde199ba920b4094f37bc4d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 13:51:34 -0600 Subject: Changelog --- changelog.d/3969.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3969.bugfix diff --git a/changelog.d/3969.bugfix b/changelog.d/3969.bugfix new file mode 100644 index 0000000000..ca2759e91e --- /dev/null +++ b/changelog.d/3969.bugfix @@ -0,0 +1 @@ +Fix HTTP error response codes for federated group requests. -- cgit 1.4.1 From 82fa31799cdf5146495fa95634928a2a3f5070be Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 14:01:02 -0600 Subject: Remove debugging statement --- synapse/handlers/groups_local.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index e36f22f128..68de8a8e53 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -37,7 +37,6 @@ def _create_rerouter(func_name): ) else: destination = get_domain_from_id(group_id) - logger.info("Triggering call") d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) -- cgit 1.4.1 From ae6ad4cf4190dbd2dadd72fc8131e4c139f8eb4a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:22:25 +0100 Subject: docstrings and unittests for storage.state (#3958) I spent ages trying to figure out how I was going mad... --- changelog.d/3958.misc | 1 + synapse/storage/state.py | 30 ++++++++++++++++++++++-------- tests/storage/test_state.py | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 8 deletions(-) create mode 100644 changelog.d/3958.misc diff --git a/changelog.d/3958.misc b/changelog.d/3958.misc new file mode 100644 index 0000000000..5931d06dcf --- /dev/null +++ b/changelog.d/3958.misc @@ -0,0 +1 @@ +Fix docstrings and add tests for state store methods diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 4b971efdba..3f4cbd61c4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -255,7 +255,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) @defer.inlineCallbacks - def get_state_groups_ids(self, room_id, event_ids): + def get_state_groups_ids(self, _room_id, event_ids): + """Get the event IDs of all the state for the state groups for the given events + + Args: + _room_id (str): id of the room for these events + event_ids (iterable[str]): ids of the events + + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) + """ if not event_ids: defer.returnValue({}) @@ -270,7 +280,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): - """Get the state IDs for the given state group + """Get the event IDs of all the state in the given state group Args: state_group (int) @@ -286,7 +296,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def get_state_groups(self, room_id, event_ids): """ Get the state groups for the given list of event_ids - The return value is a dict mapping group names to lists of events. + Returns: + Deferred[dict[int, list[EventBase]]]: + dict of state_group_id -> list of state events. """ if not event_ids: defer.returnValue({}) @@ -324,7 +336,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): member events (if True), or to exclude member events (if False) Returns: - dictionary state_group -> (dict of (type, state_key) -> event id) + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ results = {} @@ -732,8 +746,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types is not None: non_member_types = [t for t in types if t[0] != EventTypes.Member] @@ -788,8 +802,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types: types = frozenset(types) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index b910965932..b9c5b39d59 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -74,6 +74,45 @@ class StateStoreTestCase(tests.unittest.TestCase): self.assertEqual(s1[t].event_id, s2[t].event_id) self.assertEqual(len(s1), len(s2)) + @defer.inlineCallbacks + def test_get_state_groups_ids(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_map = list(state_group_map.values())[0] + self.assertDictEqual( + state_map, + { + (EventTypes.Create, ''): e1.event_id, + (EventTypes.Name, ''): e2.event_id, + }, + ) + + @defer.inlineCallbacks + def test_get_state_groups(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups( + self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_list = list(state_group_map.values())[0] + + self.assertEqual( + {ev.event_id for ev in state_list}, + {e1.event_id, e2.event_id}, + ) + @defer.inlineCallbacks def test_get_state_for_event(self): -- cgit 1.4.1 From 4a15a3e4d539dcea9a4a57e7cd800a926f2a17c3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:25:34 +0100 Subject: Include eventid in log lines when processing incoming federation transactions (#3959) when processing incoming transactions, it can be hard to see what's going on, because we process a bunch of stuff in parallel, and because we may end up recursively working our way through a chain of three or four events. This commit creates a way to use logcontexts to add the relevant event ids to the log lines. --- changelog.d/3959.feature | 1 + synapse/federation/federation_server.py | 32 ++++++++-------- synapse/handlers/federation.py | 65 ++++++++++++++++++++------------- synapse/util/logcontext.py | 41 +++++++++++++++++++-- tests/test_federation.py | 28 ++++++++------ tests/util/test_logcontext.py | 5 +++ 6 files changed, 115 insertions(+), 57 deletions(-) create mode 100644 changelog.d/3959.feature diff --git a/changelog.d/3959.feature b/changelog.d/3959.feature new file mode 100644 index 0000000000..b3a4f37a8d --- /dev/null +++ b/changelog.d/3959.feature @@ -0,0 +1 @@ +Include eventid in log lines when processing incoming federation transactions \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9a571e4fc7..819e8f7331 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,6 +46,7 @@ from synapse.replication.http.federation import ( from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache +from synapse.util.logcontext import nested_logging_context from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in @@ -187,21 +188,22 @@ class FederationServer(FederationBase): for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - try: - yield self._handle_received_pdu( - origin, pdu - ) - pdu_results[event_id] = {} - except FederationError as e: - logger.warn("Error handling PDU %s: %s", event_id, e) - pdu_results[event_id] = {"error": str(e)} - except Exception as e: - f = failure.Failure() - pdu_results[event_id] = {"error": str(e)} - logger.error( - "Failed to handle PDU %s: %s", - event_id, f.getTraceback().rstrip(), - ) + with nested_logging_context(event_id): + try: + yield self._handle_received_pdu( + origin, pdu + ) + pdu_results[event_id] = {} + except FederationError as e: + logger.warn("Error handling PDU %s: %s", event_id, e) + pdu_results[event_id] = {"error": str(e)} + except Exception as e: + f = failure.Failure() + pdu_results[event_id] = {"error": str(e)} + logger.error( + "Failed to handle PDU %s: %s", + event_id, f.getTraceback().rstrip(), + ) yield concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ccdc3bfa7..993546387c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -339,14 +339,18 @@ class FederationHandler(BaseHandler): "[%s %s] Requesting state at missing prev_event %s", room_id, event_id, p, ) - state, got_auth_chain = ( - yield self.federation_client.get_state_for_room( - origin, room_id, p, + + with logcontext.nested_logging_context(p): + state, got_auth_chain = ( + yield self.federation_client.get_state_for_room( + origin, room_id, p, + ) ) - ) - auth_chains.update(got_auth_chain) - state_group = {(x.type, x.state_key): x.event_id for x in state} - state_groups.append(state_group) + auth_chains.update(got_auth_chain) + state_group = { + (x.type, x.state_key): x.event_id for x in state + } + state_groups.append(state_group) # Resolve any conflicting state def fetch(ev_ids): @@ -483,20 +487,21 @@ class FederationHandler(BaseHandler): "[%s %s] Handling received prev_event %s", room_id, event_id, ev.event_id, ) - try: - yield self.on_receive_pdu( - origin, - ev, - sent_to_us_directly=False, - ) - except FederationError as e: - if e.code == 403: - logger.warn( - "[%s %s] Received prev_event %s failed history check.", - room_id, event_id, ev.event_id, + with logcontext.nested_logging_context(ev.event_id): + try: + yield self.on_receive_pdu( + origin, + ev, + sent_to_us_directly=False, ) - else: - raise + except FederationError as e: + if e.code == 403: + logger.warn( + "[%s %s] Received prev_event %s failed history check.", + room_id, event_id, ev.event_id, + ) + else: + raise @defer.inlineCallbacks def _process_received_pdu(self, origin, event, state, auth_chain): @@ -1135,7 +1140,8 @@ class FederationHandler(BaseHandler): try: logger.info("Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id) - yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) + with logcontext.nested_logging_context(p.event_id): + yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( "Error handling queued PDU %s from %s: %s", @@ -1581,15 +1587,22 @@ class FederationHandler(BaseHandler): Notifies about the events where appropriate. """ - contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - logcontext.run_in_background( - self._prep_event, + + @defer.inlineCallbacks + def prep(ev_info): + event = ev_info["event"] + with logcontext.nested_logging_context(suffix=event.event_id): + res = yield self._prep_event( origin, - ev_info["event"], + event, state=ev_info.get("state"), auth_events=ev_info.get("auth_events"), ) + defer.returnValue(res) + + contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( + [ + logcontext.run_in_background(prep, ev_info) for ev_info in event_infos ], consumeErrors=True, )) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a0c2d37610..89224b26cc 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -200,7 +200,7 @@ class LoggingContext(object): sentinel = Sentinel() - def __init__(self, name=None, parent_context=None): + def __init__(self, name=None, parent_context=None, request=None): self.previous_context = LoggingContext.current_context() self.name = name @@ -218,6 +218,13 @@ class LoggingContext(object): self.parent_context = parent_context + if self.parent_context is not None: + self.parent_context.copy_to(self) + + if request is not None: + # the request param overrides the request from the parent context + self.request = request + def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -256,9 +263,6 @@ class LoggingContext(object): ) self.alive = True - if self.parent_context is not None: - self.parent_context.copy_to(self) - return self def __exit__(self, type, value, traceback): @@ -439,6 +443,35 @@ class PreserveLoggingContext(object): ) +def nested_logging_context(suffix, parent_context=None): + """Creates a new logging context as a child of another. + + The nested logging context will have a 'request' made up of the parent context's + request, plus the given suffix. + + CPU/db usage stats will be added to the parent context's on exit. + + Normal usage looks like: + + with nested_logging_context(suffix): + # ... do stuff + + Args: + suffix (str): suffix to add to the parent context's 'request'. + parent_context (LoggingContext|None): parent context. Will use the current context + if None. + + Returns: + LoggingContext: new logging context. + """ + if parent_context is None: + parent_context = LoggingContext.current_context() + return LoggingContext( + parent_context=parent_context, + request=parent_context.request + "-" + suffix, + ) + + def preserve_fn(f): """Function decorator which wraps the function with run_in_background""" def g(*args, **kwargs): diff --git a/tests/test_federation.py b/tests/test_federation.py index 2540604fcc..ff55c7a627 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -6,6 +6,7 @@ from twisted.internet.defer import maybeDeferred, succeed from synapse.events import FrozenEvent from synapse.types import Requester, UserID from synapse.util import Clock +from synapse.util.logcontext import LoggingContext from tests import unittest from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver @@ -117,9 +118,10 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", lying_event, sent_to_us_directly=True - ) + with LoggingContext(request="lying_event"): + d = self.handler.on_receive_pdu( + "test.serv", lying_event, sent_to_us_directly=True + ) # Step the reactor, so the database fetches come back self.reactor.advance(1) @@ -209,11 +211,12 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", good_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - self.assertEqual(self.successResultOf(d), None) + with LoggingContext(request="good_event"): + d = self.handler.on_receive_pdu( + "test.serv", good_event, sent_to_us_directly=True + ) + self.reactor.advance(1) + self.assertEqual(self.successResultOf(d), None) bad_event = FrozenEvent( { @@ -230,10 +233,11 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", bad_event, sent_to_us_directly=True - ) - self.reactor.advance(1) + with LoggingContext(request="bad_event"): + d = self.handler.on_receive_pdu( + "test.serv", bad_event, sent_to_us_directly=True + ) + self.reactor.advance(1) extrem = maybeDeferred( self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 4633db77b3..8adaee3c8d 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -159,6 +159,11 @@ class LoggingContextTestCase(unittest.TestCase): self.assertEqual(r, "bum") self._check_test_key("one") + def test_nested_logging_context(self): + with LoggingContext(request="foo"): + nested_context = logcontext.nested_logging_context(suffix="bar") + self.assertEqual(nested_context.request, "foo-bar") + # a function which returns a deferred which has been "called", but # which had a function which returned another incomplete deferred on -- cgit 1.4.1 From e3c159863d72ba1628394497bba45dd96b9cc1ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 08:09:07 +0100 Subject: Clarifications in FederationHandler * add some comments on things that look a bit bogus * rename this `state` variable to avoid confusion with the `state` used elsewhere in this function. (There was no actual conflict, but it was a confusing bit of spaghetti.) --- synapse/handlers/federation.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 128926e719..6793f9b6c6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -341,14 +341,23 @@ class FederationHandler(BaseHandler): ) with logcontext.nested_logging_context(p): - state, got_auth_chain = ( + # XXX if any of the missing prevs share missing state or auth + # events, we'll end up requesting those missing events for + # *each* missing prev, contributing to the hammering of /event + # as per https://github.com/matrix-org/synapse/issues/2164. + remote_state, got_auth_chain = ( yield self.federation_client.get_state_for_room( origin, room_id, p, ) ) + + # XXX hrm I'm not convinced that duplicate events will compare + # for equality, so I'm not sure this does what the author + # hoped. auth_chains.update(got_auth_chain) + state_group = { - (x.type, x.state_key): x.event_id for x in state + (x.type, x.state_key): x.event_id for x in remote_state } state_groups.append(state_group) -- cgit 1.4.1 From ad8e137062deb70ab39bcade147e943abbeb6839 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 17:43:30 +0100 Subject: changelog --- changelog.d/3967.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3967.misc diff --git a/changelog.d/3967.misc b/changelog.d/3967.misc new file mode 100644 index 0000000000..dc808aec73 --- /dev/null +++ b/changelog.d/3967.misc @@ -0,0 +1 @@ +Clarifications in FederationHandler -- cgit 1.4.1 From 28223841e05a77a44ec2c0b29d1e930c68974913 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 19:17:36 +0100 Subject: more comments --- synapse/federation/federation_client.py | 2 -- synapse/handlers/federation.py | 7 +++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8bf1ad0c1f..d05ed91d64 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -209,8 +209,6 @@ class FederationClient(FederationBase): Will attempt to get the PDU from each destination in the list until one succeeds. - This will persist the PDU locally upon receipt. - Args: destinations (list): Which home servers to query event_id (str): event to fetch diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6793f9b6c6..38bebbf598 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -341,10 +341,9 @@ class FederationHandler(BaseHandler): ) with logcontext.nested_logging_context(p): - # XXX if any of the missing prevs share missing state or auth - # events, we'll end up requesting those missing events for - # *each* missing prev, contributing to the hammering of /event - # as per https://github.com/matrix-org/synapse/issues/2164. + # note that if any of the missing prevs share missing state or + # auth events, the requests to fetch those events are deduped + # by the get_pdu_cache in federation_client. remote_state, got_auth_chain = ( yield self.federation_client.get_state_for_room( origin, room_id, p, -- cgit 1.4.1 From a215b698c43f4cfe8a2fdb9c160c8ecd1c1297c5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 09:52:56 +0100 Subject: Fix "unhashable type: 'list'" exception in federation handling get_state_groups returns a map from state_group_id to a list of FrozenEvents, so was very much the wrong thing to be putting as one of the entries in the list passed to resolve_events_with_factory (which expects maps from (event_type, state_key) to event id). We actually want get_state_groups_ids().values() rather than get_state_groups(). This fixes the main problem in #3923, but there are other problems with this bit of code which get discovered once you do so. --- synapse/handlers/federation.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 38bebbf598..2d6b8edec4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -106,7 +106,7 @@ class FederationHandler(BaseHandler): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastore() # type: synapse.storage.DataStore self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self.server_name = hs.hostname @@ -325,12 +325,17 @@ class FederationHandler(BaseHandler): # Calculate the state of the previous events, and # de-conflict them to find the current state. - state_groups = [] auth_chains = set() try: # Get the state of the events we know about - ours = yield self.store.get_state_groups(room_id, list(seen)) - state_groups.append(ours) + ours = yield self.store.get_state_groups_ids(room_id, seen) + + # state_maps is a list of mappings from (type, state_key) to event_id + # type: list[dict[tuple[str, str], str]] + state_maps = list(ours.values()) + + # we don't need this any more, let's delete it. + del ours # Ask the remote server for the states we don't # know about @@ -355,10 +360,10 @@ class FederationHandler(BaseHandler): # hoped. auth_chains.update(got_auth_chain) - state_group = { + remote_state_map = { (x.type, x.state_key): x.event_id for x in remote_state } - state_groups.append(state_group) + state_maps.append(remote_state_map) # Resolve any conflicting state def fetch(ev_ids): @@ -368,7 +373,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_groups, {event_id: pdu}, fetch + room_version, state_maps, {event_id: pdu}, fetch, ) state = (yield self.store.get_events(state_map.values())).values() -- cgit 1.4.1 From bd61c82bdf97460a33080bcb6b2c836616a3b415 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 12:16:13 +0100 Subject: Include state from remote servers in pdu handling If we've fetched state events from remote servers in order to resolve the state for a new event, we need to actually pass those events into resolve_events_with_factory (so that it can do the state res) and then persist the ones we need - otherwise other bits of the codebase get confused about why we have state groups pointing to non-existent events. --- synapse/handlers/federation.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2d6b8edec4..cdad565d04 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -326,6 +326,9 @@ class FederationHandler(BaseHandler): # Calculate the state of the previous events, and # de-conflict them to find the current state. auth_chains = set() + event_map = { + event_id: pdu, + } try: # Get the state of the events we know about ours = yield self.store.get_state_groups_ids(room_id, seen) @@ -365,18 +368,30 @@ class FederationHandler(BaseHandler): } state_maps.append(remote_state_map) + for x in remote_state: + event_map[x.event_id] = x + # Resolve any conflicting state + @defer.inlineCallbacks def fetch(ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False + fetched = yield self.store.get_events( + ev_ids, get_prev_content=False, check_redacted=False, ) + # add any events we fetch here to the `event_map` so that we + # can use them to build the state event list below. + event_map.update(fetched) + defer.returnValue(fetched) room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_maps, {event_id: pdu}, fetch, + room_version, state_maps, event_map, fetch, ) - state = (yield self.store.get_events(state_map.values())).values() + # we need to give _process_received_pdu the actual state events + # rather than event ids, so generate that now. + state = [ + event_map[e] for e in six.itervalues(state_map) + ] auth_chain = list(auth_chains) except Exception: logger.warn( -- cgit 1.4.1 From 333bee27f53916bf5354a39a79aa468967730326 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 19:49:59 +0100 Subject: Include event when resolving state for missing prevs If we have a forward extremity for a room as `E`, and you receive `A`, `B`, s.t. `A -> B -> E`, and `B` also points to an unknown event `X`, then we need to do state res between `X` and `E`. When that happens, we need to make sure we include `X` in the state that goes into the state res alg. Fixes #3934. --- synapse/handlers/federation.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cdad565d04..d05b63673f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -323,8 +323,8 @@ class FederationHandler(BaseHandler): affected=pdu.event_id, ) - # Calculate the state of the previous events, and - # de-conflict them to find the current state. + # Calculate the state after each of the previous events, and + # resolve them to find the correct state at the current event. auth_chains = set() event_map = { event_id: pdu, @@ -358,6 +358,20 @@ class FederationHandler(BaseHandler): ) ) + # we want the state *after* p; get_state_for_room returns the + # state *before* p. + remote_event = yield self.federation_client.get_pdu( + [origin], p, outlier=True, + ) + + if remote_event is None: + raise Exception( + "Unable to get missing prev_event %s" % (p, ) + ) + + if remote_event.is_state(): + remote_state.append(remote_event) + # XXX hrm I'm not convinced that duplicate events will compare # for equality, so I'm not sure this does what the author # hoped. -- cgit 1.4.1 From 948a6d877606deb4ccc8e42fd25f50e2edcc068d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 19:56:51 +0100 Subject: changelog --- changelog.d/3968.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3968.bugfix diff --git a/changelog.d/3968.bugfix b/changelog.d/3968.bugfix new file mode 100644 index 0000000000..18d43cd64e --- /dev/null +++ b/changelog.d/3968.bugfix @@ -0,0 +1 @@ +Fix exceptions when processing incoming events over federation \ No newline at end of file -- cgit 1.4.1 From 0d36fe35634cc44212073f2f6fab764ee7d8c0a9 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 25 Sep 2018 09:43:21 +0100 Subject: Build and push docker image to hub --- .circleci/config.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 521aca22ef..ff9d2a0c2d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,12 @@ version: 2 jobs: + dockerhubupload: + machine: true + steps: + - checkout + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG . + - run: docker login --username matrixdotorg --password $DOCKER_HUB_PASSWORD + - run: docker push sytestpy2: machine: true steps: @@ -131,3 +138,7 @@ workflows: filters: branches: ignore: /develop|master|release-.*/ + - dockerhub: + filters: + tags: + only: /v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ -- cgit 1.4.1 From 3852c2bc3900813affe7adf761238694fb6a1fd4 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Thu, 27 Sep 2018 12:00:01 +0100 Subject: Fix typo --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ff9d2a0c2d..e404818a5e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -138,7 +138,7 @@ workflows: filters: branches: ignore: /develop|master|release-.*/ - - dockerhub: + - dockerhubupload: filters: tags: only: /v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ -- cgit 1.4.1 From 73a089f461dce236ce84054e591eff873200dbbb Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 25 Sep 2018 10:29:38 +0100 Subject: Make username configurable in the UI too --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e404818a5e..9a43477185 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ jobs: steps: - checkout - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG . - - run: docker login --username matrixdotorg --password $DOCKER_HUB_PASSWORD + - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - run: docker push sytestpy2: machine: true -- cgit 1.4.1 From a3c11f73204857a363f78a26412f5aa46adf8650 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 25 Sep 2018 10:37:08 +0100 Subject: Also, don't run this job on any branches --- .circleci/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9a43477185..cb329aa241 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -141,4 +141,6 @@ workflows: - dockerhubupload: filters: tags: - only: /v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ + only: /^v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ + branches: + ignore: /.*/ -- cgit 1.4.1 From 8607397ea763cedaafe51124063504436a268fa9 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:44:21 +0100 Subject: Make a ":latest" tag, and a SHA1 commit ID one too. Latest is horrible and makes debugging what has happened anywhere a nightmare. We push a latest because of demand for it, but we'll also push a SHA1 commit id so those wanting to know what they're running (and be able to roll back if required) can use those instead. Note that latest here is defined as "most recent master commit" not "most recent released version", as the actual semantics of making latest correct while still being able to build bugfixed releases of previous versions is just ARGH. So we define it as "master" not "latest release". --- .circleci/config.yml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index cb329aa241..3cb14793fc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,12 +1,21 @@ version: 2 jobs: - dockerhubupload: + dockerhubuploadrelease: machine: true steps: - checkout - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - - run: docker push + - run: docker push matrixdotorg/synapse:$CIRCLE_TAG + dockerhubuploadlatest: + machine: true + steps: + - checkout + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_SHA1 . + - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD + - run: docker tag matrixdotorg/synapse:$CIRCLE_SHA1 matrixdotorg/synapse:latest + - run: docker push matrixdotorg/synapse:$CIRCLE_SHA1 + - run: docker push matrixdotorg/synapse:latest sytestpy2: machine: true steps: @@ -138,9 +147,13 @@ workflows: filters: branches: ignore: /develop|master|release-.*/ - - dockerhubupload: + - dockerhubuploadrelease: filters: tags: only: /^v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ branches: ignore: /.*/ + - dockerhubuploadlatest: + filters: + branches: + only: master -- cgit 1.4.1 From 74bbdd0412b8e35994c82cbbccad4af2122039fb Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:56:18 +0100 Subject: Do the changelog.d dance --- changelog.d/3946.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3946.misc diff --git a/changelog.d/3946.misc b/changelog.d/3946.misc new file mode 100644 index 0000000000..803857a297 --- /dev/null +++ b/changelog.d/3946.misc @@ -0,0 +1 @@ +Automate pushes to docker hub -- cgit 1.4.1 From dc5db01ff25dac6ec74ceea9b4d815a8c43cd7dd Mon Sep 17 00:00:00 2001 From: Schnuffle Date: Thu, 27 Sep 2018 13:38:50 +0200 Subject: Replaced all occurences of e.message with str(e) Signed-off-by: Schnuffle --- scripts-dev/dump_macaroon.py | 2 +- synapse/api/filtering.py | 2 +- synapse/app/__init__.py | 2 +- synapse/app/appservice.py | 2 +- synapse/app/client_reader.py | 2 +- synapse/app/event_creator.py | 2 +- synapse/app/federation_reader.py | 2 +- synapse/app/federation_sender.py | 2 +- synapse/app/frontend_proxy.py | 2 +- synapse/app/homeserver.py | 4 ++-- synapse/app/media_repository.py | 2 +- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 2 +- synapse/app/user_dir.py | 2 +- synapse/config/__main__.py | 2 +- synapse/handlers/e2e_keys.py | 2 +- synapse/handlers/profile.py | 2 +- tests/unittest.py | 2 +- 18 files changed, 19 insertions(+), 19 deletions(-) diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 6e45be75d6..fcc5568835 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -21,4 +21,4 @@ try: verifier.verify(macaroon, key) print "Signature is correct" except Exception as e: - print e.message + print str(e) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index a31a9a17e0..eed8c67e6a 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -226,7 +226,7 @@ class Filtering(object): jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA, format_checker=FormatChecker()) except jsonschema.ValidationError as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) class FilterCollection(object): diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index 3b6b9368b8..c3afcc573b 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -24,7 +24,7 @@ try: python_dependencies.check_requirements() except python_dependencies.MissingRequirementError as e: message = "\n".join([ - "Missing Requirement: %s" % (e.message,), + "Missing Requirement: %s" % (str(e),), "To install run:", " pip install --upgrade --force \"%s\"" % (e.dependency,), "", diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 02039f7e79..8559e141af 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -136,7 +136,7 @@ def start(config_options): "Synapse appservice", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.appservice" diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 4c73c637bb..76aed8c60a 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -153,7 +153,7 @@ def start(config_options): "Synapse client reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.client_reader" diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index bc82197b2a..9060ab14f6 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -169,7 +169,7 @@ def start(config_options): "Synapse event creator", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.event_creator" diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 18ca71ef99..228a297fb8 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -140,7 +140,7 @@ def start(config_options): "Synapse federation reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_reader" diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 6501c57792..e9a99d76e1 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -160,7 +160,7 @@ def start(config_options): "Synapse federation sender", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_sender" diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index b076fbe522..fc4b25de1c 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -228,7 +228,7 @@ def start(config_options): "Synapse frontend proxy", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.frontend_proxy" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8c5d858b0b..a98fdbd210 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -301,7 +301,7 @@ class SynapseHomeServer(HomeServer): try: database_engine.check_database(db_conn.cursor()) except IncorrectDatabaseSetup as e: - quit_with_error(e.message) + quit_with_error(str(e)) # Gauges to expose monthly active user control metrics @@ -328,7 +328,7 @@ def setup(config_options): config_options, ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 992d182dba..acc0487adc 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -133,7 +133,7 @@ def start(config_options): "Synapse media repository", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.media_repository" diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 2ec4c7defb..630dcda478 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -191,7 +191,7 @@ def start(config_options): "Synapse pusher", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.pusher" diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index df81b7bcbe..9a7fc6ee9d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -410,7 +410,7 @@ def start(config_options): "Synapse synchrotron", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.synchrotron" diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index b383e79c1c..0a5f62b509 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -188,7 +188,7 @@ def start(config_options): "Synapse user directory", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.user_dir" diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 58c97a70af..8fccf573ee 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -25,7 +25,7 @@ if __name__ == "__main__": try: config = HomeServerConfig.load_config("", sys.argv[3:]) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) print (getattr(config, key)) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 578e9250fb..9dc46aa15f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -341,7 +341,7 @@ class E2eKeysHandler(object): def _exception_to_failure(e): if isinstance(e, CodeMessageException): return { - "status": e.code, "message": e.message, + "status": e.code, "message": str(e), } if isinstance(e, NotRetryingDestination): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 75b8b7ce6a..f284d5a385 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -278,7 +278,7 @@ class BaseProfileHandler(BaseHandler): except Exception as e: logger.warn( "Failed to update join event for room %s - %s", - room_id, str(e.message) + room_id, str(e) ) diff --git a/tests/unittest.py b/tests/unittest.py index ef905e6389..043710afaf 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -121,7 +121,7 @@ class TestCase(unittest.TestCase): try: self.assertEquals(attrs[key], getattr(obj, key)) except AssertionError as e: - raise (type(e))(e.message + " for '.%s'" % key) + raise (type(e))(str(e) + " for '.%s'" % key) def assert_dict(self, required, actual): """Does a partial assert of a dict. -- cgit 1.4.1 From a873896096bd05b608d7db80c9f0c12090877644 Mon Sep 17 00:00:00 2001 From: Schnuffle Date: Thu, 27 Sep 2018 14:01:44 +0200 Subject: Added changelog fragment --- changelog.d/3970.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3970.bugfix diff --git a/changelog.d/3970.bugfix b/changelog.d/3970.bugfix new file mode 100644 index 0000000000..c4caa31ede --- /dev/null +++ b/changelog.d/3970.bugfix @@ -0,0 +1 @@ +Replaced all occurences of e.message with str(e)Replaced all occurences of e.message with str(e). Contributed by Schnuffle -- cgit 1.4.1 From 484a9b8c81a2315a05a00c7094b7f5fb4a9d5794 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 27 Sep 2018 12:48:06 +0100 Subject: Remove redundant, failing, test This test didn't do what it claimed to do, and what it claimed to do was the same as test_cant_hide_direct_ancestors anyway. This stuff is tested by sytest anyway. --- tests/test_federation.py | 106 ----------------------------------------------- 1 file changed, 106 deletions(-) diff --git a/tests/test_federation.py b/tests/test_federation.py index ff55c7a627..952a0a7b51 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -141,109 +141,3 @@ class MessageAcceptTests(unittest.TestCase): self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id ) self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv") - - def test_cant_hide_past_history(self): - """ - If you send a message, you must be able to provide the direct - prev_events that said event references. - """ - - def post_json(destination, path, data, headers=None, timeout=0): - if path.startswith("/_matrix/federation/v1/get_missing_events/"): - return { - "events": [ - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "three:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [("four:test.serv", {})], - } - ] - } - - self.http_client.post_json = post_json - - def get_json(destination, path, args, headers=None): - if path.startswith("/_matrix/federation/v1/state_ids/"): - d = self.successResultOf( - self.homeserver.datastore.get_state_ids_for_event("one:test.serv") - ) - - return succeed( - { - "pdu_ids": [ - y - for x, y in d.items() - if x == ("m.room.member", "@us:test") - ], - "auth_chain_ids": list(d.values()), - } - ) - - self.http_client.get_json = get_json - - # Figure out what the most recent event is - most_recent = self.successResultOf( - maybeDeferred( - self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id - ) - )[0] - - # Make a good event - good_event = FrozenEvent( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "one:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [(most_recent, {})], - } - ) - - with LoggingContext(request="good_event"): - d = self.handler.on_receive_pdu( - "test.serv", good_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - self.assertEqual(self.successResultOf(d), None) - - bad_event = FrozenEvent( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "two:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [("one:test.serv", {}), ("three:test.serv", {})], - } - ) - - with LoggingContext(request="bad_event"): - d = self.handler.on_receive_pdu( - "test.serv", bad_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - - extrem = maybeDeferred( - self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id - ) - self.assertEqual(self.successResultOf(extrem)[0], "two:test.serv") - - state = self.homeserver.get_state_handler().get_current_state_ids(self.room_id) - self.reactor.advance(1) - self.assertIn(("m.room.member", "@us:test"), self.successResultOf(state).keys()) -- cgit 1.4.1 From 861c063ebc8989e6b25d2e00e7aedf0ecc780f41 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 27 Sep 2018 22:47:01 +1000 Subject: Update 3970.bugfix --- changelog.d/3970.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3970.bugfix b/changelog.d/3970.bugfix index c4caa31ede..5625315497 100644 --- a/changelog.d/3970.bugfix +++ b/changelog.d/3970.bugfix @@ -1 +1 @@ -Replaced all occurences of e.message with str(e)Replaced all occurences of e.message with str(e). Contributed by Schnuffle +Replaced all occurences of e.message with str(e). Contributed by Schnuffle -- cgit 1.4.1 From b3064532d005e08a3f5ec0c2a2decf688208f5e6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 27 Sep 2018 23:21:54 +1000 Subject: Run our oldest supported configuration in CI (#3952) --- .travis.yml | 3 +++ changelog.d/3952.misc | 1 + synapse/python_dependencies.py | 27 ++++++++++++++------------- tox.ini | 20 ++++++++++++++++++++ 4 files changed, 38 insertions(+), 13 deletions(-) create mode 100644 changelog.d/3952.misc diff --git a/.travis.yml b/.travis.yml index b6faca4b92..2077f6af72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,9 @@ matrix: - python: 2.7 env: TOX_ENV=py27 + - python: 2.7 + env: TOX_ENV=py27-old + - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" services: diff --git a/changelog.d/3952.misc b/changelog.d/3952.misc new file mode 100644 index 0000000000..015e4a43e6 --- /dev/null +++ b/changelog.d/3952.misc @@ -0,0 +1 @@ +Run the test suite on the oldest supported versions of our dependencies in CI. \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c779f69fa0..0f339a0320 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -33,31 +33,32 @@ logger = logging.getLogger(__name__) # [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies REQUIREMENTS = { "jsonschema>=2.5.1": ["jsonschema>=2.5.1"], - "frozendict>=0.4": ["frozendict"], + "frozendict>=1": ["frozendict"], "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"], "canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"], "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], - "service_identity>=1.0.0": ["service_identity>=1.0.0"], + "service_identity>=16.0.0": ["service_identity>=16.0.0"], "Twisted>=17.1.0": ["twisted>=17.1.0"], "treq>=15.1": ["treq>=15.1"], # Twisted has required pyopenssl 16.0 since about Twisted 16.6. "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], - "pyyaml": ["yaml"], - "pyasn1": ["pyasn1"], - "daemonize": ["daemonize"], - "bcrypt": ["bcrypt>=3.1.0"], - "pillow": ["PIL"], - "pydenticon": ["pydenticon"], - "sortedcontainers": ["sortedcontainers"], - "pysaml2>=3.0.0": ["saml2>=3.0.0"], - "pymacaroons-pynacl": ["pymacaroons"], + "pyyaml>=3.11": ["yaml"], + "pyasn1>=0.1.9": ["pyasn1"], + "pyasn1-modules>=0.0.7": ["pyasn1_modules"], + "daemonize>=2.3.1": ["daemonize"], + "bcrypt>=3.1.0": ["bcrypt>=3.1.0"], + "pillow>=3.1.2": ["PIL"], + "pydenticon>=0.2": ["pydenticon"], + "sortedcontainers>=1.4.4": ["sortedcontainers"], + "pysaml2>=3.0.0": ["saml2"], + "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], - "six": ["six"], - "prometheus_client": ["prometheus_client"], + "six>=1.10": ["six"], + "prometheus_client>=0.0.18": ["prometheus_client"], # we use attr.s(slots), which arrived in 16.0.0 "attrs>=16.0.0": ["attr>=16.0.0"], diff --git a/tox.ini b/tox.ini index e4db563b4b..87b5e4782d 100644 --- a/tox.ini +++ b/tox.ini @@ -64,6 +64,26 @@ setenv = {[base]setenv} SYNAPSE_POSTGRES = 1 +# A test suite for the oldest supported versions of Python libraries, to catch +# any uses of APIs not available in them. +[testenv:py27-old] +skip_install=True +deps = + # Old automat version for Twisted + Automat == 0.3.0 + + mock + lxml +commands = + /usr/bin/find "{toxinidir}" -name '*.pyc' -delete + # Make all greater-thans equals so we test the oldest version of our direct + # dependencies, but make the pyopenssl 17.0, which can work against an + # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis). + /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install' + # Install Synapse itself. This won't update any libraries. + pip install -e . + {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} + [testenv:py35] usedevelop=true -- cgit 1.4.1 From 07340cdacad901a55abd0811a1fca86061b752bd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 01:42:53 +0100 Subject: untested stab at autocreating autojoin rooms --- synapse/config/registration.py | 4 ++++ synapse/handlers/register.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 0fb964eb67..686c7fa9f7 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,6 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) @@ -98,6 +99,9 @@ class RegistrationConfig(Config): # to these rooms #auto_join_rooms: # - "#example:example.com" + + # Have first user on server autocreate autojoin rooms + autocreate_auto_join_rooms: true """ % locals() def add_arguments(self, parser): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index da914c46ff..0e5337d26c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,6 +50,8 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self._room_creation_handler = hs.get_room_creation_handler() + self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -513,6 +515,22 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): + + # try to create the room if we're the first user on the server + if self.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + if count == 1 and RoomAlias.is_valid(room_identifier): + info = yield self._room_creation_handler.create_room( + requester, + config={ + "preset": "public_chat", + }, + ratelimit=False, + ) + room_id = info["room_id"] + + yield create_association(self, requester.user, room_identifier, room_id) + room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): -- cgit 1.4.1 From 19475cf3371819170737515c414e3cc7c2a32d43 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 28 Sep 2018 11:55:57 +0100 Subject: Remove redundant call to start_get_pdu_cache I think this got forgotten in #3932. We were getting away with it because it was the last call in this function. --- changelog.d/3980.bugfix | 1 + synapse/app/homeserver.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/3980.bugfix diff --git a/changelog.d/3980.bugfix b/changelog.d/3980.bugfix new file mode 100644 index 0000000000..7578414ede --- /dev/null +++ b/changelog.d/3980.bugfix @@ -0,0 +1 @@ +Fix some instances of ExpiringCache not expiring cache items diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a98fdbd210..e3f0d99a3f 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -386,7 +386,6 @@ def setup(config_options): hs.get_pusherpool().start() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() - hs.get_federation_client().start_get_pdu_cache() reactor.callWhenRunning(start) -- cgit 1.4.1 From 5304449738b0193012a613edf74437e4af2ff416 Mon Sep 17 00:00:00 2001 From: Jan Christian Grünhage Date: Fri, 28 Sep 2018 13:36:35 +0200 Subject: build python 3 docker images on circle CI (#3976) --- .circleci/config.yml | 16 +++++++++++----- changelog.d/3976.misc | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3976.misc diff --git a/.circleci/config.yml b/.circleci/config.yml index 3cb14793fc..6ae3a42235 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,18 +4,24 @@ jobs: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - - run: docker push matrixdotorg/synapse:$CIRCLE_TAG + - run: docker push matrixdotorg/synapse:${CIRCLE_TAG} + - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3 dockerhubuploadlatest: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_SHA1 . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - - run: docker tag matrixdotorg/synapse:$CIRCLE_SHA1 matrixdotorg/synapse:latest - - run: docker push matrixdotorg/synapse:$CIRCLE_SHA1 + - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest + - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3 + - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1} + - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3 - run: docker push matrixdotorg/synapse:latest + - run: docker push matrixdotorg/synapse:latest-py3 sytestpy2: machine: true steps: diff --git a/changelog.d/3976.misc b/changelog.d/3976.misc new file mode 100644 index 0000000000..282148c986 --- /dev/null +++ b/changelog.d/3976.misc @@ -0,0 +1 @@ +Build py3 docker images for docker hub too -- cgit 1.4.1 From 965154d60af59b69eac01f7cfcf821a757ae93fa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 28 Sep 2018 12:45:54 +0100 Subject: Fix complete fail to do the right thing --- synapse/federation/transaction_queue.py | 3 ++- synapse/handlers/typing.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index ae47aaae0b..98b5950800 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -354,7 +354,7 @@ class TransactionQueue(object): content=content, ) - if not destination == self.server_name: + if destination == self.server_name: logger.info("Not sending EDU to ourselves") return @@ -372,6 +372,7 @@ class TransactionQueue(object): def send_device_messages(self, destination): if destination == self.server_name: logger.info("Not sending device update to ourselves") + return self._attempt_new_transaction(destination) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 2d2d3d5a0d..bf82b3f864 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -218,6 +218,7 @@ class TypingHandler(object): for domain in set(get_domain_from_id(u) for u in users): if domain != self.server_name: + logger.debug("sending typing update to %s", domain) self.federation.send_edu( destination=domain, edu_type="m.typing", -- cgit 1.4.1 From 9a8bbc9a59e4f5534225d0d07c86652242c88efe Mon Sep 17 00:00:00 2001 From: Bruno Windels Date: Fri, 28 Sep 2018 13:36:56 +0100 Subject: add --no-admin flag to registration script (#3836) --- changelog.d/3836.bugfix | 1 + scripts/register_new_matrix_user | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelog.d/3836.bugfix diff --git a/changelog.d/3836.bugfix b/changelog.d/3836.bugfix new file mode 100644 index 0000000000..add49fbec0 --- /dev/null +++ b/changelog.d/3836.bugfix @@ -0,0 +1 @@ +support registering regular users non-interactively with register_new_matrix_user script \ No newline at end of file diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 8c3d429351..91bdb3a25b 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -133,7 +133,7 @@ def register_new_user(user, password, server_location, shared_secret, admin): print "Passwords do not match" sys.exit(1) - if not admin: + if admin is None: admin = raw_input("Make admin [no]: ") if admin in ("y", "yes", "true"): admin = True @@ -160,10 +160,16 @@ if __name__ == "__main__": default=None, help="New password for user. Will prompt if omitted.", ) - parser.add_argument( + admin_group = parser.add_mutually_exclusive_group() + admin_group.add_argument( "-a", "--admin", action="store_true", - help="Register new user as an admin. Will prompt if omitted.", + help="Register new user as an admin. Will prompt if --no-admin is not set either.", + ) + admin_group.add_argument( + "--no-admin", + action="store_true", + help="Register new user as a regular user. Will prompt if --admin is not set either.", ) group = parser.add_mutually_exclusive_group(required=True) @@ -197,4 +203,8 @@ if __name__ == "__main__": else: secret = args.shared_secret - register_new_user(args.user, args.password, args.server_url, secret, args.admin) + admin = None + if args.admin or args.no_admin: + admin = args.admin + + register_new_user(args.user, args.password, args.server_url, secret, admin) -- cgit 1.4.1 From 8f646f2d04e7d21aa0570826bd78a0bbc3bb706b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 15:37:28 +0100 Subject: fix UTs --- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 686c7fa9f7..dcf2374ed2 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,7 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) - self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 0e5337d26c..a358bfc723 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,8 +50,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self._room_creation_handler = hs.get_room_creation_handler() - self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -520,7 +518,8 @@ class RegistrationHandler(BaseHandler): if self.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - info = yield self._room_creation_handler.create_room( + room_creation_handler = hs.get_room_creation_handler() + info = yield room_creation_handler.create_room( requester, config={ "preset": "public_chat", @@ -529,7 +528,13 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - yield create_association(self, requester.user, room_identifier, room_id) + directory_handler = hs.get_handlers().directory_handler + yield directory_handler.create_association( + self, + requester.user, + room_identifier, + room_id + ) room_id = None room_member_handler = self.hs.get_room_member_handler() -- cgit 1.4.1 From 5b68f29f48acfa45e671af1fb325d0c0d532f3d6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:14:40 +0100 Subject: fix thinkos --- synapse/handlers/register.py | 12 ++++++------ synapse/storage/directory.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a358bfc723..05e8f4ea73 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -515,10 +515,10 @@ class RegistrationHandler(BaseHandler): def _join_user_to_room(self, requester, room_identifier): # try to create the room if we're the first user on the server - if self.config.autocreate_auto_join_rooms: + if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = hs.get_room_creation_handler() + room_creation_handler = self.hs.get_room_creation_handler() info = yield room_creation_handler.create_room( requester, config={ @@ -528,11 +528,11 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - directory_handler = hs.get_handlers().directory_handler + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - self, - requester.user, - room_identifier, + requester.user.to_string(), + room_alias, room_id ) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index cfb687cb53..61a029a53c 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -90,7 +90,7 @@ class DirectoryWorkerStore(SQLBaseStore): class DirectoryStore(DirectoryWorkerStore): @defer.inlineCallbacks def create_room_alias_association(self, room_alias, room_id, servers, creator=None): - """ Creates an associatin between a room alias and room_id/servers + """ Creates an association between a room alias and room_id/servers Args: room_alias (RoomAlias) -- cgit 1.4.1 From 23b6a0537f9e2b46a5857a7c7bd0a03d04d4095d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:19:37 +0100 Subject: emit room aliases event --- synapse/handlers/register.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 05e8f4ea73..7584064d5d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,9 +531,14 @@ class RegistrationHandler(BaseHandler): directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - requester.user.to_string(), - room_alias, - room_id + user_id=requester.user.to_string(), + room_alias=room_alias, + room_id=room_id, + servers=[self.hs.hostname], + ) + + yield directory_handler.send_room_alias_update_event( + requester, requester.user.to_string(), room_id ) room_id = None -- cgit 1.4.1 From faa462ef79529f6b1802764038fd7365dfc37385 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:21:01 +0100 Subject: changelog --- changelog.d/3975.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3975.feature diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature new file mode 100644 index 0000000000..5cd8ad6cca --- /dev/null +++ b/changelog.d/3975.feature @@ -0,0 +1 @@ +First user should autocreate autojoin rooms -- cgit 1.4.1 From 8ea887856c1bcb97c90d88fe20f7f82ed7f48967 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 11:59:52 +0100 Subject: Don't update eviction metrics on explicit removal --- synapse/util/caches/expiringcache.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 8ac56d85ff..f2f55ba6c9 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -103,11 +103,6 @@ class ExpiringCache(object): if value is SENTINEL: return default - if self.iterable: - self.metrics.inc_evictions(len(value.value)) - else: - self.metrics.inc_evictions() - return value def __contains__(self, key): -- cgit 1.4.1 From 4f3e3ac192f2c5ff37198f50af99bc8fbd57beca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 12:25:27 +0100 Subject: Correctly match 'dict.pop' api --- synapse/util/caches/expiringcache.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index f2f55ba6c9..f369780277 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -98,10 +98,18 @@ class ExpiringCache(object): return entry.value - def pop(self, key, default=None): - value = self._cache.pop(key, SENTINEL) + def pop(self, key, default=SENTINEL): + """Removes and returns the value with the given key from the cache. + + If the key isn't in the cache then `default` will be returned if + specified, otherwise `KeyError` will get raised. + + Identical functionality to `dict.pop(..)`. + """ + + value = self._cache.pop(key, default) if value is SENTINEL: - return default + raise KeyError(key) return value -- cgit 1.4.1 From 53c5fa4e6c179c6ed855169dcb99b69699db75be Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 1 Oct 2018 12:29:17 +0100 Subject: Further reduce the size of the docker image (#3972) Rewrite the dockerfile as a multistage build: this means we can get rid of a whole load of cruft which we don't need. --- changelog.d/3972.misc | 1 + docker/Dockerfile | 71 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 47 insertions(+), 25 deletions(-) create mode 100644 changelog.d/3972.misc diff --git a/changelog.d/3972.misc b/changelog.d/3972.misc new file mode 100644 index 0000000000..e56299ee78 --- /dev/null +++ b/changelog.d/3972.misc @@ -0,0 +1 @@ +Further reduce the docker image size diff --git a/docker/Dockerfile b/docker/Dockerfile index 1d00defc2d..db44c02a92 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,9 +1,13 @@ ARG PYTHON_VERSION=2 -FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 -COPY . /synapse +### +### Stage 0: builder +### +FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder -RUN apk add --no-cache --virtual .build_deps \ +# install the OS build deps + +RUN apk add \ build-base \ libffi-dev \ libjpeg-turbo-dev \ @@ -11,30 +15,47 @@ RUN apk add --no-cache --virtual .build_deps \ libxslt-dev \ linux-headers \ postgresql-dev \ - zlib-dev \ - && cd /synapse \ - && apk add --no-cache --virtual .runtime_deps \ - libffi \ - libjpeg-turbo \ - libressl \ - libxslt \ - libpq \ - zlib \ - su-exec \ - && pip install --upgrade \ + zlib-dev + +# build things which have slow build steps, before we copy synapse, so that +# the layer can be cached. +# +# (we really just care about caching a wheel here, as the "pip install" below +# will install them again.) + +RUN pip install --prefix="/install" --no-warn-script-location \ + cryptography \ + msgpack-python \ + pillow \ + pynacl + +# now install synapse and all of the python deps to /install. + +COPY . /synapse +RUN pip install --prefix="/install" --no-warn-script-location \ lxml \ - pip \ psycopg2 \ - setuptools \ - && mkdir -p /synapse/cache \ - && pip install -f /synapse/cache --upgrade --process-dependency-links . \ - && mv /synapse/docker/start.py /synapse/docker/conf / \ - && rm -rf \ - setup.cfg \ - setup.py \ - synapse \ - && apk del .build_deps - + /synapse + +### +### Stage 1: runtime +### + +FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 + +RUN apk add --no-cache --virtual .runtime_deps \ + libffi \ + libjpeg-turbo \ + libressl \ + libxslt \ + libpq \ + zlib \ + su-exec + +COPY --from=builder /install /usr/local +COPY ./docker/start.py /start.py +COPY ./docker/conf /conf + VOLUME ["/data"] EXPOSE 8008/tcp 8448/tcp -- cgit 1.4.1 From 82f922b4af8d41e15484e1913775d234c548d9f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 14:19:36 +0100 Subject: Fix lazy loaded sync with rejected state events In particular, we assume that the name and canonical alias events in the state have not been rejected. In practice this may not be the case (though we should probably think about fixing that) so lets ensure that we gracefully handle that case, rather than 404'ing the sync request like we do now. --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c7d69d9d80..67b8ca28c7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -567,13 +567,13 @@ class SyncHandler(object): # be a valid name or canonical_alias - i.e. we're checking that they # haven't been "deleted" by blatting {} over the top. if name_id: - name = yield self.store.get_event(name_id, allow_none=False) + name = yield self.store.get_event(name_id, allow_none=True) if name and name.content: defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( - canonical_alias_id, allow_none=False, + canonical_alias_id, allow_none=True, ) if canonical_alias and canonical_alias.content: defer.returnValue(summary) -- cgit 1.4.1 From 5fa27eac7827acdd5409d46eab22fe682689d30b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 14:24:41 +0100 Subject: Newsfile --- changelog.d/3986.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3986.bugfix diff --git a/changelog.d/3986.bugfix b/changelog.d/3986.bugfix new file mode 100644 index 0000000000..ce74345365 --- /dev/null +++ b/changelog.d/3986.bugfix @@ -0,0 +1 @@ +Fix lazy loaded sync in the presence of rejected state events -- cgit 1.4.1 From 6e05fd032c670080f9e9f99f2e2e8b8eccf24c7d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 2 Oct 2018 00:11:58 +1000 Subject: Fix userconsent on Python 3 (#3938) --- changelog.d/3938.bugfix | 1 + synapse/api/urls.py | 2 +- tests/server_notices/test_consent.py | 100 +++++++++++++++++++++++++++++++++ tests/storage/test_client_ips.py | 48 +--------------- tests/unittest.py | 80 +++++++++++++++++++++++++- tests/utils.py | 105 +++++++++++++++++++---------------- 6 files changed, 240 insertions(+), 96 deletions(-) create mode 100644 changelog.d/3938.bugfix create mode 100644 tests/server_notices/test_consent.py diff --git a/changelog.d/3938.bugfix b/changelog.d/3938.bugfix new file mode 100644 index 0000000000..01ccca21a7 --- /dev/null +++ b/changelog.d/3938.bugfix @@ -0,0 +1 @@ +Sending server notices regarding user consent now works on Python 3. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 71347912f1..6d9f1ca0ef 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -64,7 +64,7 @@ class ConsentURIBuilder(object): """ mac = hmac.new( key=self._hmac_secret, - msg=user_id, + msg=user_id.encode('ascii'), digestmod=sha256, ).hexdigest() consent_uri = "%s_matrix/consent?%s" % ( diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py new file mode 100644 index 0000000000..95badc985e --- /dev/null +++ b/tests/server_notices/test_consent.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v2_alpha import sync + +from tests import unittest + + +class ConsentNoticesTests(unittest.HomeserverTestCase): + + servlets = [ + sync.register_servlets, + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + self.consent_notice_message = "consent %(consent_uri)s" + config = self.default_config() + config.user_consent_version = "1" + config.user_consent_server_notice_content = { + "msgtype": "m.text", + "body": self.consent_notice_message, + } + config.public_baseurl = "https://example.com/" + config.form_secret = "123abc" + + config.server_notices_mxid = "@notices:test" + config.server_notices_mxid_display_name = "test display name" + config.server_notices_mxid_avatar_url = None + config.server_notices_room_name = "Server Notices" + + hs = self.setup_test_homeserver(config=config) + + return hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("bob", "abc123") + self.access_token = self.login("bob", "abc123") + + def test_get_sync_message(self): + """ + When user consent server notices are enabled, a sync will cause a notice + to fire (in a room which the user is invited to). The notice contains + the notice URL + an authentication code. + """ + # Initial sync, to get the user consent room invite + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the Room ID to join + room_id = list(channel.json_body["rooms"]["invite"].keys())[0] + + # Join the room + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/" + room_id + "/join", + access_token=self.access_token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Sync again, to get the message in the room + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the message + room = channel.json_body["rooms"]["join"][room_id] + messages = [ + x for x in room["timeline"]["events"] if x["type"] == "m.room.message" + ] + + # One message, with the consent URL + self.assertEqual(len(messages), 1) + self.assertTrue( + messages[0]["content"]["body"].startswith( + "consent https://example.com/_matrix/consent" + ) + ) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 2ffbb9f14f..4577e9422b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -14,10 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib -import hmac -import json - from mock import Mock from twisted.internet import defer @@ -145,34 +141,8 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): return hs def prepare(self, hs, reactor, clock): - self.hs.config.registration_shared_secret = u"shared" self.store = self.hs.get_datastore() - - # Create the user - request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") - self.render(request) - nonce = channel.json_body["nonce"] - - want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) - want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin") - want_mac = want_mac.hexdigest() - - body = json.dumps( - { - "nonce": nonce, - "username": "bob", - "password": "abc123", - "admin": True, - "mac": want_mac, - } - ) - request, channel = self.make_request( - "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') - ) - self.render(request) - - self.assertEqual(channel.code, 200) - self.user_id = channel.json_body["user_id"] + self.user_id = self.register_user("bob", "abc123", True) def test_request_with_xforwarded(self): """ @@ -194,20 +164,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): def _runtest(self, headers, expected_ip, make_request_args): device_id = "bleb" - body = json.dumps( - { - "type": "m.login.password", - "user": "bob", - "password": "abc123", - "device_id": device_id, - } - ) - request, channel = self.make_request( - "POST", "/_matrix/client/r0/login", body.encode('utf8'), **make_request_args - ) - self.render(request) - self.assertEqual(channel.code, 200) - access_token = channel.json_body["access_token"].encode('ascii') + access_token = self.login("bob", "abc123", device_id=device_id) # Advance to a known time self.reactor.advance(123456 - self.reactor.seconds()) @@ -215,7 +172,6 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/admin/users/" + self.user_id, - body.encode('utf8'), access_token=access_token, **make_request_args ) diff --git a/tests/unittest.py b/tests/unittest.py index 043710afaf..a59291cc60 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import hashlib +import hmac import logging from mock import Mock @@ -32,6 +34,7 @@ from synapse.types import UserID, create_requester from synapse.util.logcontext import LoggingContextFilter from tests.server import get_clock, make_request, render, setup_test_homeserver +from tests.utils import default_config # Set up putting Synapse's logs into Trial's. rootLogger = logging.getLogger() @@ -121,7 +124,7 @@ class TestCase(unittest.TestCase): try: self.assertEquals(attrs[key], getattr(obj, key)) except AssertionError as e: - raise (type(e))(str(e) + " for '.%s'" % key) + raise (type(e))(e.message + " for '.%s'" % key) def assert_dict(self, required, actual): """Does a partial assert of a dict. @@ -223,6 +226,15 @@ class HomeserverTestCase(TestCase): hs = self.setup_test_homeserver() return hs + def default_config(self, name="test"): + """ + Get a default HomeServer config object. + + Args: + name (str): The homeserver name/domain. + """ + return default_config(name) + def prepare(self, reactor, clock, homeserver): """ Prepare for the test. This involves things like mocking out parts of @@ -297,3 +309,69 @@ class HomeserverTestCase(TestCase): return d self.pump() return self.successResultOf(d) + + def register_user(self, username, password, admin=False): + """ + Register a user. Requires the Admin API be registered. + + Args: + username (bytes/unicode): The user part of the new user. + password (bytes/unicode): The password of the new user. + admin (bool): Whether the user should be created as an admin + or not. + + Returns: + The MXID of the new user (unicode). + """ + self.hs.config.registration_shared_secret = u"shared" + + # Create the user + request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + nonce_str = b"\x00".join([username.encode('utf8'), password.encode('utf8')]) + if admin: + nonce_str += b"\x00admin" + else: + nonce_str += b"\x00notadmin" + want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str) + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": username, + "password": password, + "admin": admin, + "mac": want_mac, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + user_id = channel.json_body["user_id"] + return user_id + + def login(self, username, password, device_id=None): + """ + Log in a user, and get an access token. Requires the Login API be + registered. + + """ + body = {"type": "m.login.password", "user": username, "password": password} + if device_id: + body["device_id"] = device_id + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + access_token = channel.json_body["access_token"].encode('ascii') + return access_token diff --git a/tests/utils.py b/tests/utils.py index aaed1149c3..1ef80e7b79 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -96,6 +96,62 @@ def setupdb(): atexit.register(_cleanup) +def default_config(name): + """ + Create a reasonable test config. + """ + config = Mock() + config.signing_key = [MockKey()] + config.event_cache_size = 1 + config.enable_registration = True + config.macaroon_secret_key = "not even a little secret" + config.expire_access_token = False + config.server_name = name + config.trusted_third_party_id_servers = [] + config.room_invite_state_types = [] + config.password_providers = [] + config.worker_replication_url = "" + config.worker_app = None + config.email_enable_notifs = False + config.block_non_admin_invites = False + config.federation_domain_whitelist = None + config.federation_rc_reject_limit = 10 + config.federation_rc_sleep_limit = 10 + config.federation_rc_sleep_delay = 100 + config.federation_rc_concurrent = 10 + config.filter_timeline_limit = 5000 + config.user_directory_search_all_users = False + config.user_consent_server_notice_content = None + config.block_events_without_consent_error = None + config.media_storage_providers = [] + config.auto_join_rooms = [] + config.limit_usage_by_mau = False + config.hs_disabled = False + config.hs_disabled_message = "" + config.hs_disabled_limit_type = "" + config.max_mau_value = 50 + config.mau_trial_days = 0 + config.mau_limits_reserved_threepids = [] + config.admin_contact = None + config.rc_messages_per_second = 10000 + config.rc_message_burst_count = 10000 + + # we need a sane default_room_version, otherwise attempts to create rooms will + # fail. + config.default_room_version = "1" + + # disable user directory updates, because they get done in the + # background, which upsets the test runner. + config.update_user_directory = False + + def is_threepid_reserved(threepid): + return ServerConfig.is_threepid_reserved(config, threepid) + + config.is_threepid_reserved.side_effect = is_threepid_reserved + + return config + + class TestHomeServer(HomeServer): DATASTORE_CLASS = DataStore @@ -124,54 +180,7 @@ def setup_test_homeserver( from twisted.internet import reactor if config is None: - config = Mock() - config.signing_key = [MockKey()] - config.event_cache_size = 1 - config.enable_registration = True - config.macaroon_secret_key = "not even a little secret" - config.expire_access_token = False - config.server_name = name - config.trusted_third_party_id_servers = [] - config.room_invite_state_types = [] - config.password_providers = [] - config.worker_replication_url = "" - config.worker_app = None - config.email_enable_notifs = False - config.block_non_admin_invites = False - config.federation_domain_whitelist = None - config.federation_rc_reject_limit = 10 - config.federation_rc_sleep_limit = 10 - config.federation_rc_sleep_delay = 100 - config.federation_rc_concurrent = 10 - config.filter_timeline_limit = 5000 - config.user_directory_search_all_users = False - config.user_consent_server_notice_content = None - config.block_events_without_consent_error = None - config.media_storage_providers = [] - config.auto_join_rooms = [] - config.limit_usage_by_mau = False - config.hs_disabled = False - config.hs_disabled_message = "" - config.hs_disabled_limit_type = "" - config.max_mau_value = 50 - config.mau_trial_days = 0 - config.mau_limits_reserved_threepids = [] - config.admin_contact = None - config.rc_messages_per_second = 10000 - config.rc_message_burst_count = 10000 - - # we need a sane default_room_version, otherwise attempts to create rooms will - # fail. - config.default_room_version = "1" - - # disable user directory updates, because they get done in the - # background, which upsets the test runner. - config.update_user_directory = False - - def is_threepid_reserved(threepid): - return ServerConfig.is_threepid_reserved(config, threepid) - - config.is_threepid_reserved.side_effect = is_threepid_reserved + config = default_config(name) config.use_frozen_dicts = True config.ldap_enabled = False -- cgit 1.4.1 From abdc141c2bc872e7db6761f652551eb1bbf0859d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 2 Oct 2018 01:08:38 +1000 Subject: Update instructions to point to pip install (#3985) --- .circleci/merge_base_branch.sh | 13 +++++--- README.rst | 70 ++++++++++-------------------------------- UPGRADE.rst | 6 ++-- changelog.d/3985.misc | 1 + 4 files changed, 29 insertions(+), 61 deletions(-) create mode 100644 changelog.d/3985.misc diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh index 9614eb91b6..6b0bf3aa48 100755 --- a/.circleci/merge_base_branch.sh +++ b/.circleci/merge_base_branch.sh @@ -9,13 +9,16 @@ source $BASH_ENV if [[ -z "${CIRCLE_PR_NUMBER}" ]] then - echo "Can't figure out what the PR number is!" - exit 1 + echo "Can't figure out what the PR number is! Assuming merge target is develop." + + # It probably hasn't had a PR opened yet. Since all PRs land on develop, we + # can probably assume it's based on it and will be merged into it. + GITBASE="develop" +else + # Get the reference, using the GitHub API + GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` fi -# Get the reference, using the GitHub API -GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` - # Show what we are before git show -s diff --git a/README.rst b/README.rst index 5547f617ba..e1ea351f84 100644 --- a/README.rst +++ b/README.rst @@ -81,7 +81,7 @@ Thanks for using Matrix! Synapse Installation ==================== -Synapse is the reference python/twisted Matrix homeserver implementation. +Synapse is the reference Python/Twisted Matrix homeserver implementation. System requirements: @@ -91,12 +91,13 @@ System requirements: Installing from source ---------------------- + (Prebuilt packages are available for some platforms - see `Platform-Specific Instructions`_.) -Synapse is written in python but some of the libraries it uses are written in -C. So before we can install synapse itself we need a working C compiler and the -header files for python C extensions. +Synapse is written in Python but some of the libraries it uses are written in +C. So before we can install Synapse itself we need a working C compiler and the +header files for Python C extensions. Installing prerequisites on Ubuntu or Debian:: @@ -143,18 +144,24 @@ Installing prerequisites on OpenBSD:: doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \ libxslt -To install the synapse homeserver run:: +To install the Synapse homeserver run:: virtualenv -p python2.7 ~/.synapse source ~/.synapse/bin/activate pip install --upgrade pip pip install --upgrade setuptools - pip install https://github.com/matrix-org/synapse/tarball/master + pip install matrix-synapse -This installs synapse, along with the libraries it uses, into a virtual +This installs Synapse, along with the libraries it uses, into a virtual environment under ``~/.synapse``. Feel free to pick a different directory if you prefer. +This Synapse installation can then be later upgraded by using pip again with the +update flag:: + + source ~/.synapse/bin/activate + pip install -U matrix-synapse + In case of problems, please see the _`Troubleshooting` section below. There is an offical synapse image available at @@ -167,7 +174,7 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/ -Configuring synapse +Configuring Synapse ------------------- Before you can start Synapse, you will need to generate a configuration @@ -249,26 +256,6 @@ Setting up a TURN server For reliable VoIP calls to be routed via this homeserver, you MUST configure a TURN server. See ``_ for details. -IPv6 ----- - -As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph -for providing PR #1696. - -However, for federation to work on hosts with IPv6 DNS servers you **must** -be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002 -for details. We can't make Synapse depend on Twisted 17.1 by default -yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909) -so if you are using operating system dependencies you'll have to install your -own Twisted 17.1 package via pip or backports etc. - -If you're running in a virtualenv then pip should have installed the newest -Twisted automatically, but if your virtualenv is old you will need to manually -upgrade to a newer Twisted dependency via: - - pip install Twisted>=17.1.0 - - Running Synapse =============== @@ -444,8 +431,7 @@ settings require a slightly more difficult installation process. using the ``.`` command, rather than ``bash``'s ``source``. 5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse webpages for their titles. -6) Use ``pip`` to install this repository: ``pip install - https://github.com/matrix-org/synapse/tarball/master`` +6) Use ``pip`` to install this repository: ``pip install matrix-synapse`` 7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the chance of a compromised Synapse server being used to take over your box. @@ -473,7 +459,7 @@ Troubleshooting Troubleshooting Installation ---------------------------- -Synapse requires pip 1.7 or later, so if your OS provides too old a version you +Synapse requires pip 8 or later, so if your OS provides too old a version you may need to manually upgrade it:: sudo pip install --upgrade pip @@ -508,28 +494,6 @@ failing, e.g.:: pip install twisted -On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you -will need to export CFLAGS=-Qunused-arguments. - -Troubleshooting Running ------------------------ - -If synapse fails with ``missing "sodium.h"`` crypto errors, you may need -to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for -encryption and digital signatures. -Unfortunately PyNACL currently has a few issues -(https://github.com/pyca/pynacl/issues/53) and -(https://github.com/pyca/pynacl/issues/79) that mean it may not install -correctly, causing all tests to fail with errors about missing "sodium.h". To -fix try re-installing from PyPI or directly from -(https://github.com/pyca/pynacl):: - - # Install from PyPI - pip install --user --upgrade --force pynacl - - # Install from github - pip install --user https://github.com/pyca/pynacl/tarball/master - Running out of File Handles ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..6cf3169f75 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -18,7 +18,7 @@ instructions that may be required are listed later in this document. .. code:: bash - pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master + pip install --upgrade --process-dependency-links matrix-synapse # restart synapse synctl restart @@ -48,11 +48,11 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" -Upgrading to $NEXT_VERSION +Upgrading to v0.27.3 ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. diff --git a/changelog.d/3985.misc b/changelog.d/3985.misc new file mode 100644 index 0000000000..ba935caf3a --- /dev/null +++ b/changelog.d/3985.misc @@ -0,0 +1 @@ +Updated the installation instructions to point to the matrix-synapse package on PyPI. -- cgit 1.4.1 From 8174c6725b5271923930432d1927dd39cff3547c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Oct 2018 18:48:51 +0100 Subject: Avoid reraise, to improve stacktraces --- changelog.d/3989.misc | 1 + synapse/handlers/federation.py | 20 ++++++++++---------- synapse/handlers/message.py | 25 +++++++++++++------------ 3 files changed, 24 insertions(+), 22 deletions(-) create mode 100644 changelog.d/3989.misc diff --git a/changelog.d/3989.misc b/changelog.d/3989.misc new file mode 100644 index 0000000000..26700d168f --- /dev/null +++ b/changelog.d/3989.misc @@ -0,0 +1 @@ +Improve stacktraces in certain exceptions in the logs diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d05b63673f..45d955e6f5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -18,7 +18,6 @@ import itertools import logging -import sys import six from six import iteritems, itervalues @@ -1602,6 +1601,9 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: if not event.internal_metadata.is_outlier() and not backfilled: yield self.action_generator.handle_push_actions_for_event( @@ -1612,15 +1614,13 @@ class FederationHandler(BaseHandler): [(event, context)], backfilled=backfilled, ) - except: # noqa: E722, as we reraise the exception this is fine. - tp, value, tb = sys.exc_info() - - logcontext.run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - - six.reraise(tp, value, tb) + success = True + finally: + if not success: + logcontext.run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) defer.returnValue(context) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e484061cc0..4954b23a0d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -14,9 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import sys -import six from six import iteritems, itervalues, string_types from canonicaljson import encode_canonical_json, json @@ -624,6 +622,9 @@ class EventCreationHandler(object): event, context ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: # If we're a worker we need to hit out to the master. if self.config.worker_app: @@ -636,6 +637,7 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) + success = True return yield self.persist_and_notify_client_event( @@ -645,17 +647,16 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) - except: # noqa: E722, as we reraise the exception this is fine. - # Ensure that we actually remove the entries in the push actions - # staging area, if we calculated them. - tp, value, tb = sys.exc_info() - - run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - six.reraise(tp, value, tb) + success = True + finally: + if not success: + # Ensure that we actually remove the entries in the push actions + # staging area, if we calculated them. + run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) @defer.inlineCallbacks def persist_and_notify_client_event( -- cgit 1.4.1 From 334e075dd85a21e859eb89948b606c2710b932ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 11:49:39 +0100 Subject: Fix error when logging incomplete requests If a connection is lost before a request is read from Request, Twisted sets `method` (and `uri`) attributes to dummy values. These dummy values have incorrect types (i.e. they're not bytes), and so things like `__repr__` would raise an exception. To fix this we had a helper method to return the method with a consistent type. --- synapse/http/site.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/synapse/http/site.py b/synapse/http/site.py index 50be2de3bb..e508c0bd4f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -75,14 +75,14 @@ class SynapseRequest(Request): return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), - self.method.decode('ascii', errors='replace'), + self.get_method(), self.get_redacted_uri(), self.clientproto.decode('ascii', errors='replace'), self.site.site_tag, ) def get_request_id(self): - return "%s-%i" % (self.method.decode('ascii'), self.request_seq) + return "%s-%i" % (self.get_method(), self.request_seq) def get_redacted_uri(self): uri = self.uri @@ -90,6 +90,21 @@ class SynapseRequest(Request): uri = self.uri.decode('ascii') return redact_uri(uri) + def get_method(self): + """Gets the method associated with the request (or placeholder if not + method has yet been received). + + Note: This is necessary as the placeholder value in twisted is str + rather than bytes, so we need to sanitise `self.method`. + + Returns: + str + """ + method = self.method + if isinstance(method, bytes): + method = self.method.decode('ascii') + return method + def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] @@ -119,7 +134,7 @@ class SynapseRequest(Request): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.labels(self.method.decode('ascii'), + requests_counter.labels(self.get_method(), self.request_metrics.name).inc() @contextlib.contextmanager @@ -207,14 +222,14 @@ class SynapseRequest(Request): self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( - self.start_time, name=servlet_name, method=self.method.decode('ascii'), + self.start_time, name=servlet_name, method=self.get_method(), ) self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, - self.method.decode('ascii'), + self.get_method(), self.get_redacted_uri() ) @@ -280,7 +295,7 @@ class SynapseRequest(Request): int(usage.db_txn_count), self.sentLength, code, - self.method.decode('ascii'), + self.get_method(), self.get_redacted_uri(), self.clientproto.decode('ascii', errors='replace'), user_agent, -- cgit 1.4.1 From eeb755d17b5a38a3fb9f4648c6c56962efd58500 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 12:01:51 +0100 Subject: Newsfile --- changelog.d/3990.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3990.bugfix diff --git a/changelog.d/3990.bugfix b/changelog.d/3990.bugfix new file mode 100644 index 0000000000..51ea105a64 --- /dev/null +++ b/changelog.d/3990.bugfix @@ -0,0 +1 @@ +Fix error when logging incomplete HTTP requests -- cgit 1.4.1 From 7232917f12460efa2e3579331b6a97f046afe90b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 2 Oct 2018 22:53:47 +1000 Subject: Disable frozen dicts by default (#3987) --- changelog.d/3987.misc | 1 + synapse/events/__init__.py | 13 ++++++++++--- tests/replication/slave/storage/test_events.py | 6 +++++- tests/utils.py | 3 ++- 4 files changed, 18 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3987.misc diff --git a/changelog.d/3987.misc b/changelog.d/3987.misc new file mode 100644 index 0000000000..d6b5016211 --- /dev/null +++ b/changelog.d/3987.misc @@ -0,0 +1 @@ +Disable USE_FROZEN_DICTS for unittests by default. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index b782af6308..12f1eb0a3e 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,15 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +from distutils.util import strtobool + import six from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents -# bugs where we accidentally share e.g. signature dicts. However, converting -# a dict to frozen_dicts is expensive. -USE_FROZEN_DICTS = True +# bugs where we accidentally share e.g. signature dicts. However, converting a +# dict to frozen_dicts is expensive. +# +# NOTE: This is overridden by the configuration by the Synapse worker apps, but +# for the sake of tests, it is set here while it cannot be configured on the +# homeserver object itself. +USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0")) class _EventInternalMetadata(object): diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index db44d33c68..41be5d5a1a 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from canonicaljson import encode_canonical_json + from synapse.events import FrozenEvent, _EventInternalMetadata from synapse.events.snapshot import EventContext from synapse.replication.slave.storage.events import SlavedEventStore @@ -26,7 +28,9 @@ ROOM_ID = "!room:blue" def dict_equals(self, other): - return self.__dict__ == other.__dict__ + me = encode_canonical_json(self._event_dict) + them = encode_canonical_json(other._event_dict) + return me == them def patch__eq__(cls): diff --git a/tests/utils.py b/tests/utils.py index 1ef80e7b79..dd347a0c59 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -136,6 +136,8 @@ def default_config(name): config.rc_messages_per_second = 10000 config.rc_message_burst_count = 10000 + config.use_frozen_dicts = False + # we need a sane default_room_version, otherwise attempts to create rooms will # fail. config.default_room_version = "1" @@ -182,7 +184,6 @@ def setup_test_homeserver( if config is None: config = default_config(name) - config.use_frozen_dicts = True config.ldap_enabled = False if "clock" not in kargs: -- cgit 1.4.1 From 7258d081a5523040dddf5a99ff745acf4fe238bf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 15:47:57 +0100 Subject: Fix bug when invalidating destination retry timings --- synapse/storage/transactions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index ab54977a75..a3032cdce9 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -216,7 +216,7 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - self._destination_retry_cache.pop(destination) + self._destination_retry_cache.pop(destination, None) return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, -- cgit 1.4.1 From 8f614f842d3b3e1de868c3e28b9932ed21f48f77 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 15:49:39 +0100 Subject: Newsfile --- changelog.d/3991.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3991.bugfix diff --git a/changelog.d/3991.bugfix b/changelog.d/3991.bugfix new file mode 100644 index 0000000000..6545871f55 --- /dev/null +++ b/changelog.d/3991.bugfix @@ -0,0 +1 @@ +Add a cache to get_destination_retry_timings -- cgit 1.4.1 From 058a4c665e55a8654d253bf9cafaf814b465fbd9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 3 Oct 2018 00:59:11 +1000 Subject: Remove Jenkins & other old dev junk (#3988) --- MANIFEST.in | 7 +++--- MAP.rst | 35 ----------------------------- changelog.d/3988.misc | 1 + jenkins-dendron-haproxy-postgres.sh | 23 ------------------- jenkins-dendron-postgres.sh | 20 ----------------- jenkins-flake8.sh | 22 ------------------- jenkins-postgres.sh | 18 --------------- jenkins-sqlite.sh | 16 -------------- jenkins-unittests.sh | 30 ------------------------- jenkins/clone.sh | 44 ------------------------------------- scripts-dev/copyrighter-sql.pl | 33 ---------------------------- scripts-dev/copyrighter.pl | 33 ---------------------------- 12 files changed, 4 insertions(+), 278 deletions(-) delete mode 100644 MAP.rst create mode 100644 changelog.d/3988.misc delete mode 100755 jenkins-dendron-haproxy-postgres.sh delete mode 100755 jenkins-dendron-postgres.sh delete mode 100755 jenkins-flake8.sh delete mode 100755 jenkins-postgres.sh delete mode 100755 jenkins-sqlite.sh delete mode 100755 jenkins-unittests.sh delete mode 100755 jenkins/clone.sh delete mode 100755 scripts-dev/copyrighter-sql.pl delete mode 100755 scripts-dev/copyrighter.pl diff --git a/MANIFEST.in b/MANIFEST.in index 47ae5a77b9..c6a37ac685 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -23,13 +23,9 @@ recursive-include synapse/static *.gif recursive-include synapse/static *.html recursive-include synapse/static *.js -exclude jenkins.sh -exclude jenkins*.sh -exclude jenkins* exclude Dockerfile exclude .dockerignore exclude test_postgresql.sh -recursive-exclude jenkins *.sh include pyproject.toml recursive-include changelog.d * @@ -38,3 +34,6 @@ prune .github prune demo/etc prune docker prune .circleci + +exclude jenkins* +recursive-exclude jenkins *.sh diff --git a/MAP.rst b/MAP.rst deleted file mode 100644 index 0f8e9818a8..0000000000 --- a/MAP.rst +++ /dev/null @@ -1,35 +0,0 @@ -Directory Structure -=================== - -Warning: this may be a bit stale... - -:: - - . - ├── cmdclient Basic CLI python Matrix client - ├── demo Scripts for running standalone Matrix demos - ├── docs All doc, including the draft Matrix API spec - │   ├── client-server The client-server Matrix API spec - │   ├── model Domain-specific elements of the Matrix API spec - │   ├── server-server The server-server model of the Matrix API spec - │   └── sphinx The internal API doc of the Synapse homeserver - ├── experiments Early experiments of using Synapse's internal APIs - ├── graph Visualisation of Matrix's distributed message store - ├── synapse The reference Matrix homeserver implementation - │   ├── api Common building blocks for the APIs - │   │   ├── events Definition of state representation Events - │   │   └── streams Definition of streamable Event objects - │   ├── app The __main__ entry point for the homeserver - │   ├── crypto The PKI client/server used for secure federation - │   │   └── resource PKI helper objects (e.g. keys) - │   ├── federation Server-server state replication logic - │   ├── handlers The main business logic of the homeserver - │   ├── http Wrappers around Twisted's HTTP server & client - │   ├── rest Servlet-style RESTful API - │   ├── storage Persistence subsystem (currently only sqlite3) - │   │   └── schema sqlite persistence schema - │   └── util Synapse-specific utilities - ├── tests Unit tests for the Synapse homeserver - └── webclient Basic AngularJS Matrix web client - - diff --git a/changelog.d/3988.misc b/changelog.d/3988.misc new file mode 100644 index 0000000000..10379d183b --- /dev/null +++ b/changelog.d/3988.misc @@ -0,0 +1 @@ +Remove unused Jenkins and development related files from the repo. diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh deleted file mode 100755 index 07979bf8b8..0000000000 --- a/jenkins-dendron-haproxy-postgres.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git -./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git -./dendron/jenkins/build_dendron.sh -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ - --dendron $WORKSPACE/dendron/bin/dendron \ - --haproxy \ diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh deleted file mode 100755 index 3b932fe340..0000000000 --- a/jenkins-dendron-postgres.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git -./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git -./dendron/jenkins/build_dendron.sh -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ - --dendron $WORKSPACE/dendron/bin/dendron \ diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh deleted file mode 100755 index 11f1cab6c8..0000000000 --- a/jenkins-flake8.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -# Output test results as junit xml -export TRIAL_FLAGS="--reporter=subunit" -export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" -# Write coverage reports to a separate file for each process -export COVERAGE_OPTS="-p" -export DUMP_COVERAGE_COMMAND="coverage help" - -# Output flake8 violations to violations.flake8.log -export PEP8SUFFIX="--output-file=violations.flake8.log" - -rm .coverage* || echo "No coverage files to remove" - -tox -e packaging -e pep8 diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh deleted file mode 100755 index 1afb736394..0000000000 --- a/jenkins-postgres.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git - -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh deleted file mode 100755 index baf4713a01..0000000000 --- a/jenkins-sqlite.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh deleted file mode 100755 index 4c2f103e80..0000000000 --- a/jenkins-unittests.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -# Output test results as junit xml -export TRIAL_FLAGS="--reporter=subunit" -export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" -# Write coverage reports to a separate file for each process -export COVERAGE_OPTS="-p" -export DUMP_COVERAGE_COMMAND="coverage help" - -# Output flake8 violations to violations.flake8.log -# Don't exit with non-0 status code on Jenkins, -# so that the build steps continue and a later step can decided whether to -# UNSTABLE or FAILURE this build. -export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" - -rm .coverage* || echo "No coverage files to remove" - -tox --notest -e py27 -TOX_BIN=$WORKSPACE/.tox/py27/bin -python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install -$TOX_BIN/pip install lxml - -tox -e py27 diff --git a/jenkins/clone.sh b/jenkins/clone.sh deleted file mode 100755 index ab30ac7782..0000000000 --- a/jenkins/clone.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash - -# This clones a project from github into a named subdirectory -# If the project has a branch with the same name as this branch -# then it will checkout that branch after cloning. -# Otherwise it will checkout "origin/develop." -# The first argument is the name of the directory to checkout -# the branch into. -# The second argument is the URL of the remote repository to checkout. -# Usually something like https://github.com/matrix-org/sytest.git - -set -eux - -NAME=$1 -PROJECT=$2 -BASE=".$NAME-base" - -# Update our mirror. -if [ ! -d ".$NAME-base" ]; then - # Create a local mirror of the source repository. - # This saves us from having to download the entire repository - # when this script is next run. - git clone "$PROJECT" "$BASE" --mirror -else - # Fetch any updates from the source repository. - (cd "$BASE"; git fetch -p) -fi - -# Remove the existing repository so that we have a clean copy -rm -rf "$NAME" -# Cloning with --shared means that we will share portions of the -# .git directory with our local mirror. -git clone "$BASE" "$NAME" --shared - -# Jenkins may have supplied us with the name of the branch in the -# environment. Otherwise we will have to guess based on the current -# commit. -: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} -cd "$NAME" -# check out the relevant branch -git checkout "${GIT_BRANCH}" || ( - echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" - git checkout "origin/develop" -) diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl deleted file mode 100755 index 13e630fc11..0000000000 --- a/scripts-dev/copyrighter-sql.pl +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/perl -pi -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -$copyright = < Date: Tue, 2 Oct 2018 16:22:39 +0100 Subject: Add tests --- tests/storage/test_transactions.py | 45 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 tests/storage/test_transactions.py diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py new file mode 100644 index 0000000000..14169afa96 --- /dev/null +++ b/tests/storage/test_transactions.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.unittest import HomeserverTestCase + + +class TransactionStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + + def test_get_set_transactions(self): + """Tests that we can successfully get a non-existent entry for + destination retries, as well as testing tht we can set and get + correctly. + """ + d = self.store.get_destination_retry_timings("example.com") + r = self.get_success(d) + self.assertIsNone(r) + + d = self.store.set_destination_retry_timings("example.com", 50, 100) + self.get_success(d) + + d = self.store.get_destination_retry_timings("example.com") + r = self.get_success(d) + + self.assert_dict({"retry_last_ts": 50, "retry_interval": 100}, r) + + def test_initial_set_transactions(self): + """Tests that we can successfully set the destination retries (there + was a bug around invalidating the cache that broke this) + """ + d = self.store.set_destination_retry_timings("example.com", 50, 100) + self.get_success(d) -- cgit 1.4.1 From da8f82008d73407896c3dd902740af0d85d38d0a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 3 Oct 2018 02:15:48 +1000 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index b1f7a89fba..312e4e2fa6 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5.1" +__version__ = "0.33.6rc1" -- cgit 1.4.1 From ccce6f26a651005e9d56fadafebcb90231213f89 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 3 Oct 2018 02:16:21 +1000 Subject: changelog --- CHANGES.md | 75 ++++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/3578.bugfix | 1 - changelog.d/3699.misc | 2 -- changelog.d/3794.misc | 1 - changelog.d/3836.bugfix | 1 - changelog.d/3868.bugfix | 1 - changelog.d/3873.misc | 2 -- changelog.d/3879.bugfix | 1 - changelog.d/3883.feature | 1 - changelog.d/3889.bugfix | 1 - changelog.d/3892.bugfix | 1 - changelog.d/3894.feature | 1 - changelog.d/3895.bugfix | 1 - changelog.d/3897.misc | 1 - changelog.d/3899.bugfix | 1 - changelog.d/3903.misc | 1 - changelog.d/3904.misc | 1 - changelog.d/3906.misc | 1 - changelog.d/3907.bugfix | 1 - changelog.d/3908.bugfix | 1 - changelog.d/3909.misc | 1 - changelog.d/3910.bugfix | 1 - changelog.d/3911.misc | 1 - changelog.d/3912.misc | 1 - changelog.d/3914.bugfix | 1 - changelog.d/3916.feature | 1 - changelog.d/3924.misc | 1 - changelog.d/3925.misc | 1 - changelog.d/3927.misc | 1 - changelog.d/3932.bugfix | 1 - changelog.d/3933.misc | 1 - changelog.d/3936.bugfix | 1 - changelog.d/3938.bugfix | 1 - changelog.d/3946.misc | 1 - changelog.d/3947.misc | 1 - changelog.d/3948.misc | 1 - changelog.d/3952.misc | 1 - changelog.d/3956.bugfix | 1 - changelog.d/3957.misc | 1 - changelog.d/3958.misc | 1 - changelog.d/3959.feature | 1 - changelog.d/3960.bugfix | 1 - changelog.d/3961.bugfix | 1 - changelog.d/3963.misc | 1 - changelog.d/3964.feature | 1 - changelog.d/3965.misc | 1 - changelog.d/3966.misc | 1 - changelog.d/3967.misc | 1 - changelog.d/3968.bugfix | 1 - changelog.d/3970.bugfix | 1 - changelog.d/3972.misc | 1 - changelog.d/3976.misc | 1 - changelog.d/3980.bugfix | 1 - changelog.d/3985.misc | 1 - changelog.d/3986.bugfix | 1 - changelog.d/3987.misc | 1 - changelog.d/3988.misc | 1 - changelog.d/3989.misc | 1 - changelog.d/3990.bugfix | 1 - changelog.d/3991.bugfix | 1 - 60 files changed, 75 insertions(+), 61 deletions(-) delete mode 100644 changelog.d/3578.bugfix delete mode 100644 changelog.d/3699.misc delete mode 100644 changelog.d/3794.misc delete mode 100644 changelog.d/3836.bugfix delete mode 100644 changelog.d/3868.bugfix delete mode 100644 changelog.d/3873.misc delete mode 100644 changelog.d/3879.bugfix delete mode 100644 changelog.d/3883.feature delete mode 100644 changelog.d/3889.bugfix delete mode 100644 changelog.d/3892.bugfix delete mode 100644 changelog.d/3894.feature delete mode 100644 changelog.d/3895.bugfix delete mode 100644 changelog.d/3897.misc delete mode 100644 changelog.d/3899.bugfix delete mode 100644 changelog.d/3903.misc delete mode 100644 changelog.d/3904.misc delete mode 100644 changelog.d/3906.misc delete mode 100644 changelog.d/3907.bugfix delete mode 100644 changelog.d/3908.bugfix delete mode 100644 changelog.d/3909.misc delete mode 100644 changelog.d/3910.bugfix delete mode 100644 changelog.d/3911.misc delete mode 100644 changelog.d/3912.misc delete mode 100644 changelog.d/3914.bugfix delete mode 100644 changelog.d/3916.feature delete mode 100644 changelog.d/3924.misc delete mode 100644 changelog.d/3925.misc delete mode 100644 changelog.d/3927.misc delete mode 100644 changelog.d/3932.bugfix delete mode 100644 changelog.d/3933.misc delete mode 100644 changelog.d/3936.bugfix delete mode 100644 changelog.d/3938.bugfix delete mode 100644 changelog.d/3946.misc delete mode 100644 changelog.d/3947.misc delete mode 100644 changelog.d/3948.misc delete mode 100644 changelog.d/3952.misc delete mode 100644 changelog.d/3956.bugfix delete mode 100644 changelog.d/3957.misc delete mode 100644 changelog.d/3958.misc delete mode 100644 changelog.d/3959.feature delete mode 100644 changelog.d/3960.bugfix delete mode 100644 changelog.d/3961.bugfix delete mode 100644 changelog.d/3963.misc delete mode 100644 changelog.d/3964.feature delete mode 100644 changelog.d/3965.misc delete mode 100644 changelog.d/3966.misc delete mode 100644 changelog.d/3967.misc delete mode 100644 changelog.d/3968.bugfix delete mode 100644 changelog.d/3970.bugfix delete mode 100644 changelog.d/3972.misc delete mode 100644 changelog.d/3976.misc delete mode 100644 changelog.d/3980.bugfix delete mode 100644 changelog.d/3985.misc delete mode 100644 changelog.d/3986.bugfix delete mode 100644 changelog.d/3987.misc delete mode 100644 changelog.d/3988.misc delete mode 100644 changelog.d/3989.misc delete mode 100644 changelog.d/3990.bugfix delete mode 100644 changelog.d/3991.bugfix diff --git a/CHANGES.md b/CHANGES.md index 45d3cdb131..e9b491559e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,78 @@ +Synapse 0.33.6rc1 (2018-10-03) +============================== + +Features +-------- + +- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883)) +- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894)) +- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916)) +- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959)) +- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964)) + + +Bugfixes +-------- + +- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578)) +- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836)) +- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868)) +- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879)) +- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889)) +- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895)) +- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899)) +- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907)) +- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908)) +- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910)) +- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914)) +- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980)) +- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936)) +- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938)) +- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956)) +- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960)) +- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961)) +- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968)) +- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) +- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986)) +- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990)) +- Add a cache to get_destination_retry_timings ([\#3991](https://github.com/matrix-org/synapse/issues/3991)) + + +Internal Changes +---------------- + +- Unit tests can now be run under PostgreSQL in Docker using + ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699)) +- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794)) +- Remove documentation regarding installation on Cygwin, the use of WSL is + recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873)) +- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897)) +- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903)) +- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966)) +- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909)) +- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911)) +- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912)) +- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924)) +- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) +- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927)) +- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933)) +- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946)) +- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947)) +- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948)) +- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952)) +- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957)) +- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958)) +- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963)) +- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965)) +- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967)) +- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972)) +- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976)) +- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985)) +- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987)) +- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988)) +- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989)) + + Synapse 0.33.5.1 (2018-09-25) ============================= diff --git a/changelog.d/3578.bugfix b/changelog.d/3578.bugfix deleted file mode 100644 index 9c52b6fa7e..0000000000 --- a/changelog.d/3578.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix problem when playing media from Chrome using direct URL (thanks @remjey!) diff --git a/changelog.d/3699.misc b/changelog.d/3699.misc deleted file mode 100644 index 437efbd98f..0000000000 --- a/changelog.d/3699.misc +++ /dev/null @@ -1,2 +0,0 @@ -Unit tests can now be run under PostgreSQL in Docker using -``test_postgresql.sh``. diff --git a/changelog.d/3794.misc b/changelog.d/3794.misc deleted file mode 100644 index 6b98c9609b..0000000000 --- a/changelog.d/3794.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up calculation of typing updates for replication diff --git a/changelog.d/3836.bugfix b/changelog.d/3836.bugfix deleted file mode 100644 index add49fbec0..0000000000 --- a/changelog.d/3836.bugfix +++ /dev/null @@ -1 +0,0 @@ -support registering regular users non-interactively with register_new_matrix_user script \ No newline at end of file diff --git a/changelog.d/3868.bugfix b/changelog.d/3868.bugfix deleted file mode 100644 index 99da8389a5..0000000000 --- a/changelog.d/3868.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix broken invite email links for self hosted riots diff --git a/changelog.d/3873.misc b/changelog.d/3873.misc deleted file mode 100644 index 8104b5c085..0000000000 --- a/changelog.d/3873.misc +++ /dev/null @@ -1,2 +0,0 @@ -Remove documentation regarding installation on Cygwin, the use of WSL is -recommended instead. diff --git a/changelog.d/3879.bugfix b/changelog.d/3879.bugfix deleted file mode 100644 index 82cb2a8ebc..0000000000 --- a/changelog.d/3879.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't ratelimit autojoins diff --git a/changelog.d/3883.feature b/changelog.d/3883.feature deleted file mode 100644 index c11e5c5309..0000000000 --- a/changelog.d/3883.feature +++ /dev/null @@ -1 +0,0 @@ -Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. \ No newline at end of file diff --git a/changelog.d/3889.bugfix b/changelog.d/3889.bugfix deleted file mode 100644 index e976425987..0000000000 --- a/changelog.d/3889.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 500 error when deleting unknown room alias diff --git a/changelog.d/3892.bugfix b/changelog.d/3892.bugfix deleted file mode 100644 index 8b30afab04..0000000000 --- a/changelog.d/3892.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some b'abcd' noise in logs and metrics diff --git a/changelog.d/3894.feature b/changelog.d/3894.feature deleted file mode 100644 index 1ed0cccdb2..0000000000 --- a/changelog.d/3894.feature +++ /dev/null @@ -1 +0,0 @@ -Report "python_version" in the phone home stats diff --git a/changelog.d/3895.bugfix b/changelog.d/3895.bugfix deleted file mode 100644 index 8b30afab04..0000000000 --- a/changelog.d/3895.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some b'abcd' noise in logs and metrics diff --git a/changelog.d/3897.misc b/changelog.d/3897.misc deleted file mode 100644 index 87e7ac796e..0000000000 --- a/changelog.d/3897.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in README, synaspse -> synapse \ No newline at end of file diff --git a/changelog.d/3899.bugfix b/changelog.d/3899.bugfix deleted file mode 100644 index 5120e3a823..0000000000 --- a/changelog.d/3899.bugfix +++ /dev/null @@ -1 +0,0 @@ -When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. diff --git a/changelog.d/3903.misc b/changelog.d/3903.misc deleted file mode 100644 index 49b64bf333..0000000000 --- a/changelog.d/3903.misc +++ /dev/null @@ -1 +0,0 @@ -Increase the timeout when filling missing events in federation requests \ No newline at end of file diff --git a/changelog.d/3904.misc b/changelog.d/3904.misc deleted file mode 100644 index 1e3c8e1706..0000000000 --- a/changelog.d/3904.misc +++ /dev/null @@ -1 +0,0 @@ -Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/changelog.d/3906.misc b/changelog.d/3906.misc deleted file mode 100644 index 11709186d3..0000000000 --- a/changelog.d/3906.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging of outbound federation requests \ No newline at end of file diff --git a/changelog.d/3907.bugfix b/changelog.d/3907.bugfix deleted file mode 100644 index 45e010c052..0000000000 --- a/changelog.d/3907.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect server-name indication for outgoing federation requests \ No newline at end of file diff --git a/changelog.d/3908.bugfix b/changelog.d/3908.bugfix deleted file mode 100644 index 518aee6c4d..0000000000 --- a/changelog.d/3908.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix adding client IPs to the database failing on Python 3. \ No newline at end of file diff --git a/changelog.d/3909.misc b/changelog.d/3909.misc deleted file mode 100644 index 11709186d3..0000000000 --- a/changelog.d/3909.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging of outbound federation requests \ No newline at end of file diff --git a/changelog.d/3910.bugfix b/changelog.d/3910.bugfix deleted file mode 100644 index 22ec2adc33..0000000000 --- a/changelog.d/3910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where things occaisonally were not being timed out correctly. diff --git a/changelog.d/3911.misc b/changelog.d/3911.misc deleted file mode 100644 index e31311d520..0000000000 --- a/changelog.d/3911.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the docker image building on python 3 diff --git a/changelog.d/3912.misc b/changelog.d/3912.misc deleted file mode 100644 index 87d73697ea..0000000000 --- a/changelog.d/3912.misc +++ /dev/null @@ -1 +0,0 @@ -Add a regression test for logging failed HTTP requests on Python 3. \ No newline at end of file diff --git a/changelog.d/3914.bugfix b/changelog.d/3914.bugfix deleted file mode 100644 index 27e6bad590..0000000000 --- a/changelog.d/3914.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where outbound federation would stop talking to some servers when using workers diff --git a/changelog.d/3916.feature b/changelog.d/3916.feature deleted file mode 100644 index 13282d992b..0000000000 --- a/changelog.d/3916.feature +++ /dev/null @@ -1 +0,0 @@ -Always LL ourselves if we're in a room diff --git a/changelog.d/3924.misc b/changelog.d/3924.misc deleted file mode 100644 index 8d010e0bd9..0000000000 --- a/changelog.d/3924.misc +++ /dev/null @@ -1 +0,0 @@ -Comments and interface cleanup for on_receive_pdu \ No newline at end of file diff --git a/changelog.d/3925.misc b/changelog.d/3925.misc deleted file mode 100644 index 3e41f78ff5..0000000000 --- a/changelog.d/3925.misc +++ /dev/null @@ -1 +0,0 @@ -Fix spurious exceptions when remote http client closes conncetion diff --git a/changelog.d/3927.misc b/changelog.d/3927.misc deleted file mode 100644 index 4bd8e25f67..0000000000 --- a/changelog.d/3927.misc +++ /dev/null @@ -1 +0,0 @@ -Log exceptions thrown by background tasks diff --git a/changelog.d/3932.bugfix b/changelog.d/3932.bugfix deleted file mode 100644 index 7578414ede..0000000000 --- a/changelog.d/3932.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some instances of ExpiringCache not expiring cache items diff --git a/changelog.d/3933.misc b/changelog.d/3933.misc deleted file mode 100644 index 6545871f55..0000000000 --- a/changelog.d/3933.misc +++ /dev/null @@ -1 +0,0 @@ -Add a cache to get_destination_retry_timings diff --git a/changelog.d/3936.bugfix b/changelog.d/3936.bugfix deleted file mode 100644 index 49b02b9e27..0000000000 --- a/changelog.d/3936.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix out-of-bounds error when LLing yourself diff --git a/changelog.d/3938.bugfix b/changelog.d/3938.bugfix deleted file mode 100644 index 01ccca21a7..0000000000 --- a/changelog.d/3938.bugfix +++ /dev/null @@ -1 +0,0 @@ -Sending server notices regarding user consent now works on Python 3. diff --git a/changelog.d/3946.misc b/changelog.d/3946.misc deleted file mode 100644 index 803857a297..0000000000 --- a/changelog.d/3946.misc +++ /dev/null @@ -1 +0,0 @@ -Automate pushes to docker hub diff --git a/changelog.d/3947.misc b/changelog.d/3947.misc deleted file mode 100644 index 5a9a22bed9..0000000000 --- a/changelog.d/3947.misc +++ /dev/null @@ -1 +0,0 @@ -Require attrs 16.0.0 or later diff --git a/changelog.d/3948.misc b/changelog.d/3948.misc deleted file mode 100644 index a93edbd1c3..0000000000 --- a/changelog.d/3948.misc +++ /dev/null @@ -1 +0,0 @@ -Fix incompatibility with python3 on alpine \ No newline at end of file diff --git a/changelog.d/3952.misc b/changelog.d/3952.misc deleted file mode 100644 index 015e4a43e6..0000000000 --- a/changelog.d/3952.misc +++ /dev/null @@ -1 +0,0 @@ -Run the test suite on the oldest supported versions of our dependencies in CI. \ No newline at end of file diff --git a/changelog.d/3956.bugfix b/changelog.d/3956.bugfix deleted file mode 100644 index b0828c9fc6..0000000000 --- a/changelog.d/3956.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exceptions from metrics handler \ No newline at end of file diff --git a/changelog.d/3957.misc b/changelog.d/3957.misc deleted file mode 100644 index 69d647f119..0000000000 --- a/changelog.d/3957.misc +++ /dev/null @@ -1 +0,0 @@ -CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. diff --git a/changelog.d/3958.misc b/changelog.d/3958.misc deleted file mode 100644 index 5931d06dcf..0000000000 --- a/changelog.d/3958.misc +++ /dev/null @@ -1 +0,0 @@ -Fix docstrings and add tests for state store methods diff --git a/changelog.d/3959.feature b/changelog.d/3959.feature deleted file mode 100644 index b3a4f37a8d..0000000000 --- a/changelog.d/3959.feature +++ /dev/null @@ -1 +0,0 @@ -Include eventid in log lines when processing incoming federation transactions \ No newline at end of file diff --git a/changelog.d/3960.bugfix b/changelog.d/3960.bugfix deleted file mode 100644 index a01dc60df6..0000000000 --- a/changelog.d/3960.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error message for events with m.room.create missing from auth_events \ No newline at end of file diff --git a/changelog.d/3961.bugfix b/changelog.d/3961.bugfix deleted file mode 100644 index e46b5834aa..0000000000 --- a/changelog.d/3961.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix errors due to concurrent monthly_active_user upserts diff --git a/changelog.d/3963.misc b/changelog.d/3963.misc deleted file mode 100644 index f1e0eaf18e..0000000000 --- a/changelog.d/3963.misc +++ /dev/null @@ -1 +0,0 @@ -fix docstring for FederationClient.get_state_for_room diff --git a/changelog.d/3964.feature b/changelog.d/3964.feature deleted file mode 100644 index 599222eb58..0000000000 --- a/changelog.d/3964.feature +++ /dev/null @@ -1 +0,0 @@ -Remove spurious check which made 'localhost' servers not work diff --git a/changelog.d/3965.misc b/changelog.d/3965.misc deleted file mode 100644 index e7e4a9c5a8..0000000000 --- a/changelog.d/3965.misc +++ /dev/null @@ -1 +0,0 @@ -Run notify_app_services as a bg process diff --git a/changelog.d/3966.misc b/changelog.d/3966.misc deleted file mode 100644 index 1e3c8e1706..0000000000 --- a/changelog.d/3966.misc +++ /dev/null @@ -1 +0,0 @@ -Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/changelog.d/3967.misc b/changelog.d/3967.misc deleted file mode 100644 index dc808aec73..0000000000 --- a/changelog.d/3967.misc +++ /dev/null @@ -1 +0,0 @@ -Clarifications in FederationHandler diff --git a/changelog.d/3968.bugfix b/changelog.d/3968.bugfix deleted file mode 100644 index 18d43cd64e..0000000000 --- a/changelog.d/3968.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exceptions when processing incoming events over federation \ No newline at end of file diff --git a/changelog.d/3970.bugfix b/changelog.d/3970.bugfix deleted file mode 100644 index 5625315497..0000000000 --- a/changelog.d/3970.bugfix +++ /dev/null @@ -1 +0,0 @@ -Replaced all occurences of e.message with str(e). Contributed by Schnuffle diff --git a/changelog.d/3972.misc b/changelog.d/3972.misc deleted file mode 100644 index e56299ee78..0000000000 --- a/changelog.d/3972.misc +++ /dev/null @@ -1 +0,0 @@ -Further reduce the docker image size diff --git a/changelog.d/3976.misc b/changelog.d/3976.misc deleted file mode 100644 index 282148c986..0000000000 --- a/changelog.d/3976.misc +++ /dev/null @@ -1 +0,0 @@ -Build py3 docker images for docker hub too diff --git a/changelog.d/3980.bugfix b/changelog.d/3980.bugfix deleted file mode 100644 index 7578414ede..0000000000 --- a/changelog.d/3980.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some instances of ExpiringCache not expiring cache items diff --git a/changelog.d/3985.misc b/changelog.d/3985.misc deleted file mode 100644 index ba935caf3a..0000000000 --- a/changelog.d/3985.misc +++ /dev/null @@ -1 +0,0 @@ -Updated the installation instructions to point to the matrix-synapse package on PyPI. diff --git a/changelog.d/3986.bugfix b/changelog.d/3986.bugfix deleted file mode 100644 index ce74345365..0000000000 --- a/changelog.d/3986.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix lazy loaded sync in the presence of rejected state events diff --git a/changelog.d/3987.misc b/changelog.d/3987.misc deleted file mode 100644 index d6b5016211..0000000000 --- a/changelog.d/3987.misc +++ /dev/null @@ -1 +0,0 @@ -Disable USE_FROZEN_DICTS for unittests by default. diff --git a/changelog.d/3988.misc b/changelog.d/3988.misc deleted file mode 100644 index 10379d183b..0000000000 --- a/changelog.d/3988.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused Jenkins and development related files from the repo. diff --git a/changelog.d/3989.misc b/changelog.d/3989.misc deleted file mode 100644 index 26700d168f..0000000000 --- a/changelog.d/3989.misc +++ /dev/null @@ -1 +0,0 @@ -Improve stacktraces in certain exceptions in the logs diff --git a/changelog.d/3990.bugfix b/changelog.d/3990.bugfix deleted file mode 100644 index 51ea105a64..0000000000 --- a/changelog.d/3990.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error when logging incomplete HTTP requests diff --git a/changelog.d/3991.bugfix b/changelog.d/3991.bugfix deleted file mode 100644 index 6545871f55..0000000000 --- a/changelog.d/3991.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add a cache to get_destination_retry_timings -- cgit 1.4.1 From ed763aeba82e1b6fa9f9bb80401e1c7f451a758b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 3 Oct 2018 02:27:41 +1000 Subject: some changelog fixes --- CHANGES.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e9b491559e..03d7412874 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -35,17 +35,14 @@ Bugfixes - Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) - Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986)) - Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990)) -- Add a cache to get_destination_retry_timings ([\#3991](https://github.com/matrix-org/synapse/issues/3991)) Internal Changes ---------------- -- Unit tests can now be run under PostgreSQL in Docker using - ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699)) +- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699)) - Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794)) -- Remove documentation regarding installation on Cygwin, the use of WSL is - recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873)) +- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873)) - Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897)) - Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903)) - Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966)) @@ -55,7 +52,7 @@ Internal Changes - Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924)) - Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) - Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927)) -- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933)) +- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991)) - Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946)) - Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947)) - Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948)) @@ -79,8 +76,7 @@ Synapse 0.33.5.1 (2018-09-25) Internal Changes ---------------- -- Fix incompatibility with older Twisted version in tests. Thanks - @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940)) +- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940)) Synapse 0.33.5 (2018-09-24) -- cgit 1.4.1 From ae61ade8919085ad0c90e0b54c4e8338998ee64a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 2 Oct 2018 23:33:29 +0100 Subject: Fix bug in forward_extremity update logic An event does not stop being a forward_extremity just because an outlier or rejected event refers to it. --- changelog.d/3995.bug | 1 + synapse/storage/events.py | 111 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 79 insertions(+), 33 deletions(-) create mode 100644 changelog.d/3995.bug diff --git a/changelog.d/3995.bug b/changelog.d/3995.bug new file mode 100644 index 0000000000..2adc36756b --- /dev/null +++ b/changelog.d/3995.bug @@ -0,0 +1 @@ +Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e7487311ce..046174edb3 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -38,6 +38,7 @@ from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id +from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder @@ -386,12 +387,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) for room_id, ev_ctx_rm in iteritems(events_by_room): - # Work out new extremities by recursively adding and removing - # the new events. latest_event_ids = yield self.get_latest_event_ids_in_room( room_id ) - new_latest_event_ids = yield self._calculate_new_extremeties( + new_latest_event_ids = yield self._calculate_new_extremities( room_id, ev_ctx_rm, latest_event_ids ) @@ -400,6 +399,12 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # No change in extremities, so no change in state continue + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + new_forward_extremeties[room_id] = new_latest_event_ids len_1 = ( @@ -517,44 +522,88 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) @defer.inlineCallbacks - def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids): - """Calculates the new forward extremeties for a room given events to + def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): + """Calculates the new forward extremities for a room given events to persist. Assumes that we are only persisting events for one room at a time. """ - new_latest_event_ids = set(latest_event_ids) - # First, add all the new events to the list - new_latest_event_ids.update( - event.event_id for event, ctx in event_contexts + + # we're only interested in new events which aren't outliers and which aren't + # being rejected. + new_events = [ + event for event, ctx in event_contexts if not event.internal_metadata.is_outlier() and not ctx.rejected + ] + + # start with the existing forward extremities + result = set(latest_event_ids) + + # add all the new events to the list + result.update( + event.event_id for event in new_events ) - # Now remove all events that are referenced by the to-be-added events - new_latest_event_ids.difference_update( + + # Now remove all events which are prev_events of any of the new events + result.difference_update( e_id - for event, ctx in event_contexts + for event in new_events for e_id, _ in event.prev_events - if not event.internal_metadata.is_outlier() and not ctx.rejected ) - # And finally remove any events that are referenced by previously added - # events. - rows = yield self._simple_select_many_batch( - table="event_edges", - column="prev_event_id", - iterable=list(new_latest_event_ids), - retcols=["prev_event_id"], - keyvalues={ - "is_state": False, - }, - desc="_calculate_new_extremeties", - ) + # Finally, remove any events which are prev_events of any existing events. + existing_prevs = yield self._get_events_which_are_prevs(result) + result.difference_update(existing_prevs) - new_latest_event_ids.difference_update( - row["prev_event_id"] for row in rows - ) + if not result: + logger.warn( + "Forward extremity list A+B-C-D is now empty in %s. " + "Old extremities (A): %s, new events (B): %s, " + "existing events which are reffed by new events (C): %s, " + "new events which are reffed by existing events (D): %s", + room_id, latest_event_ids, new_events, + [e_id for event in new_events for e_id, _ in event.prev_events], + existing_prevs, + ) + defer.returnValue(result) - defer.returnValue(new_latest_event_ids) + @defer.inlineCallbacks + def _get_events_which_are_prevs(self, event_ids): + """Filter the supplied list of event_ids to get those which are prev_events of + existing (non-outlier) events. + + Args: + event_ids (Iterable[str]): event ids to filter + + Returns: + Deferred[List[str]]: filtered event ids + """ + results = [] + + def _get_events(txn, batch): + sql = """ + SELECT prev_event_id + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND rejections.event_id IS NULL + """ % ( + ",".join("?" for _ in batch), + ) + + txn.execute(sql, batch) + results.extend(r[0] for r in txn) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events, + chunk, + ) + + defer.returnValue(results) @defer.inlineCallbacks def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids, @@ -586,10 +635,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore the new current state is only returned if we've already calculated it. """ - - if not new_latest_event_ids: - return - # map from state_group to ((type, key) -> event_id) state map state_groups_map = {} -- cgit 1.4.1 From 3e39783d5d7ca5da7b3619d0328f6aeec48854de Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 2 Oct 2018 23:44:14 +0100 Subject: remove debugging --- synapse/storage/events.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 046174edb3..8822dc7bcb 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -555,16 +555,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) - if not result: - logger.warn( - "Forward extremity list A+B-C-D is now empty in %s. " - "Old extremities (A): %s, new events (B): %s, " - "existing events which are reffed by new events (C): %s, " - "new events which are reffed by existing events (D): %s", - room_id, latest_event_ids, new_events, - [e_id for event in new_events for e_id, _ in event.prev_events], - existing_prevs, - ) defer.returnValue(result) @defer.inlineCallbacks -- cgit 1.4.1 From 2a4ea3baa8e6723f1bdbae2dad0f99c2a73646a5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Oct 2018 07:09:25 +0100 Subject: fix newsfile name --- changelog.d/3995.bug | 1 - changelog.d/3995.bugfix | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/3995.bug create mode 100644 changelog.d/3995.bugfix diff --git a/changelog.d/3995.bug b/changelog.d/3995.bug deleted file mode 100644 index 2adc36756b..0000000000 --- a/changelog.d/3995.bug +++ /dev/null @@ -1 +0,0 @@ -Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file diff --git a/changelog.d/3995.bugfix b/changelog.d/3995.bugfix new file mode 100644 index 0000000000..2adc36756b --- /dev/null +++ b/changelog.d/3995.bugfix @@ -0,0 +1 @@ +Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file -- cgit 1.4.1 From 9693625e556df1af66ba376d49411064c2d0f47e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Oct 2018 10:19:41 +0100 Subject: actually exclude outliers --- synapse/storage/events.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 8822dc7bcb..03cedf3a75 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -560,7 +560,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore @defer.inlineCallbacks def _get_events_which_are_prevs(self, event_ids): """Filter the supplied list of event_ids to get those which are prev_events of - existing (non-outlier) events. + existing (non-outlier/rejected) events. Args: event_ids (Iterable[str]): event ids to filter @@ -578,6 +578,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore LEFT JOIN rejections USING (event_id) WHERE prev_event_id IN (%s) + AND NOT events.outlier AND rejections.event_id IS NULL """ % ( ",".join("?" for _ in batch), -- cgit 1.4.1 From 7c570bff749eaf72be6f982c89facab3b16c42ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:28:01 +0100 Subject: Fix exception in background metrics collection We attempted to iterate through a list on a separate thread without doing the necessary copying. --- synapse/metrics/background_process_metrics.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 173908299c..037f1c490e 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -101,9 +101,13 @@ class _Collector(object): labels=["name"], ) - # We copy the dict so that it doesn't change from underneath us + # We copy the dict so that it doesn't change from underneath us. + # We also copy the process lists as that can also change with _bg_metrics_lock: - _background_processes_copy = dict(_background_processes) + _background_processes_copy = { + k: list(v) + for k, v in six.iteritems(_background_processes) + } for desc, processes in six.iteritems(_background_processes_copy): background_process_in_flight_count.add_metric( -- cgit 1.4.1 From c69faf8c4a045e4c53bb50b4dce4369fb586f0e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:29:44 +0100 Subject: Newsfile --- changelog.d/3996.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3996.bugfix diff --git a/changelog.d/3996.bugfix b/changelog.d/3996.bugfix new file mode 100644 index 0000000000..a056485ea1 --- /dev/null +++ b/changelog.d/3996.bugfix @@ -0,0 +1 @@ +Fix exception in background metrics collection -- cgit 1.4.1 From 495a9d06bb21cf30376292d6592aa5fd59a52634 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:34:30 +0100 Subject: Fix exception handling in fetching remote profiles --- synapse/handlers/profile.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index f284d5a385..1dfbde84fd 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get displayname") raise - except Exception: - logger.exception("Failed to get displayname") - else: - defer.returnValue(result["displayname"]) + + defer.returnValue(result["displayname"]) @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get avatar_url") raise - except Exception: - logger.exception("Failed to get avatar_url") defer.returnValue(result["avatar_url"]) -- cgit 1.4.1 From 6e0c66f6515bbbf4e21f6f8837d5ee0d48bbd71a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:40:02 +0100 Subject: Newsfile --- changelog.d/3997.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3997.bugfix diff --git a/changelog.d/3997.bugfix b/changelog.d/3997.bugfix new file mode 100644 index 0000000000..b060ee8c18 --- /dev/null +++ b/changelog.d/3997.bugfix @@ -0,0 +1 @@ +Fix exception handling in fetching remote profiles -- cgit 1.4.1 From d1b7c0ca05bdcc3629cc3af97756ab71a94da294 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 3 Oct 2018 11:52:05 +0100 Subject: Docker build all tags starting vX.Y.Z, inc RCs Note the regex must match the complete string anyway, so the leading ^ was useless. --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6ae3a42235..ec3848b048 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -156,7 +156,7 @@ workflows: - dockerhubuploadrelease: filters: tags: - only: /^v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/ + only: /v[0-9].[0-9]+.[0-9]+.*/ branches: ignore: /.*/ - dockerhubuploadlatest: -- cgit 1.4.1 From 69e857853fe91d22fa792c9e26edd87840526c22 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:57:30 +0100 Subject: Fix handling of rejected threepid invites --- synapse/event_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 02fa46ef7f..c6c011b6b6 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -157,7 +157,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): raise AuthError( 403, ( "You cannot issue a third party invite for %s." % - (event.content.display_name,) + (event.content.get("display_name", ""),) ) ) else: -- cgit 1.4.1 From 93a86039040665ec6fbe7bf0c6d7c5024fa95dd5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:59:05 +0100 Subject: Newsfile --- changelog.d/3999.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3999.bugfix diff --git a/changelog.d/3999.bugfix b/changelog.d/3999.bugfix new file mode 100644 index 0000000000..dc3b2caffa --- /dev/null +++ b/changelog.d/3999.bugfix @@ -0,0 +1 @@ +Fix handling of rejected threepid invites -- cgit 1.4.1 From 52e6e815bef4379773fde257c85c4fc4d11cd181 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 14:13:07 +0100 Subject: Sanitise error messages when user doesn't have permission to invite --- synapse/event_auth.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index c6c011b6b6..af3eee95b9 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): if user_level < invite_level: raise AuthError( - 403, ( - "You cannot issue a third party invite for %s." % - (event.content.get("display_name", ""),) - ) + 403, "You don't have permission to invite users", ) else: logger.debug("Allowing! %s", event) @@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events): if user_level < invite_level: raise AuthError( - 403, "You cannot invite user %s." % target_user_id + 403, "You don't have permission to invite users", ) elif Membership.JOIN == membership: # Joins are valid iff caller == target and they were: -- cgit 1.4.1 From a59d8996682b33454e1afcf2ab1bc23c5224e325 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Oct 2018 17:15:35 +0100 Subject: Pin to prometheus_client<0.4 to avoid renaming all of our metrics --- changelog.d/4002.misc | 1 + synapse/python_dependencies.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4002.misc diff --git a/changelog.d/4002.misc b/changelog.d/4002.misc new file mode 100644 index 0000000000..545abe5d58 --- /dev/null +++ b/changelog.d/4002.misc @@ -0,0 +1 @@ +Pin to prometheus_client<0.4 to avoid renaming all of our metrics \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0f339a0320..d4d983b00a 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -58,7 +58,10 @@ REQUIREMENTS = { "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six>=1.10": ["six"], - "prometheus_client>=0.0.18": ["prometheus_client"], + + # prometheus_client 0.4.0 changed the format of counter metrics + # (cf https://github.com/matrix-org/synapse/issues/4001) + "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"], # we use attr.s(slots), which arrived in 16.0.0 "attrs>=16.0.0": ["attr>=16.0.0"], -- cgit 1.4.1 From fd99787162113857119c033355548c5b3769a309 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 27 Sep 2018 14:53:58 -0600 Subject: Incorporate Dave's work for GDPR login flows As per https://github.com/vector-im/riot-web/issues/7168#issuecomment-419996117 --- synapse/api/constants.py | 1 + synapse/handlers/auth.py | 4 ++++ synapse/rest/client/v2_alpha/auth.py | 20 ++++++++++++++++++++ synapse/rest/client/v2_alpha/register.py | 15 +++++++++++++++ 4 files changed, 40 insertions(+) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c2630c4c64..b2815da0ab 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -51,6 +51,7 @@ class LoginType(object): EMAIL_IDENTITY = u"m.login.email.identity" MSISDN = u"m.login.msisdn" RECAPTCHA = u"m.login.recaptcha" + TERMS = u"m.login.terms" DUMMY = u"m.login.dummy" # Only for C/S API v1 diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2a5eab124f..f08a2cdd7e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -59,6 +59,7 @@ class AuthHandler(BaseHandler): LoginType.EMAIL_IDENTITY: self._check_email_identity, LoginType.MSISDN: self._check_msisdn, LoginType.DUMMY: self._check_dummy_auth, + LoginType.TERMS: self._check_terms_auth, } self.bcrypt_rounds = hs.config.bcrypt_rounds @@ -431,6 +432,9 @@ class AuthHandler(BaseHandler): def _check_dummy_auth(self, authdict, _): return defer.succeed(True) + def _check_terms_auth(self, authdict, _): + return defer.succeed(True) + @defer.inlineCallbacks def _check_threepid(self, medium, authdict): if 'threepid_creds' not in authdict: diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bd8b5f4afa..bc3bfee4a0 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -130,6 +130,26 @@ class AuthRestServlet(RestServlet): request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + request.write(html_bytes) + finish_request(request) + defer.returnValue(None) + elif stagetype == LoginType.TERMS: + session = request.args['session'][0] + authdict = { + 'session': session, + } + success = yield self.auth_handler.add_oob_auth( + LoginType.TERMS, + authdict, + self.hs.get_ip_from_request(request) + ) + + html = "hai" + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + request.write(html_bytes) finish_request(request) defer.returnValue(None) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 192f52e462..dedf5269ed 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -359,6 +359,21 @@ class RegisterRestServlet(RestServlet): [LoginType.MSISDN, LoginType.EMAIL_IDENTITY] ]) + if self.hs.config.block_events_without_consent_error is not None: + new_flows = [] + for flow in flows: + # To only allow registration if completing GDPR auth, + # making clients that don't support it use fallback auth. + #flow.append(LoginType.TERMS) + + # or to duplicate all the flows above with the GDPR flow on the + # end so clients that support it can use it but clients that don't + # continue to consent via the DM from server notices bot. + new_flows.extend([ + flow + [LoginType.TERMS] + ]) + flows.extend(new_flows) + auth_result, params, session_id = yield self.auth_handler.check_auth( flows, body, self.hs.get_ip_from_request(request) ) -- cgit 1.4.1 From 149c4f176563bd8c976d9c4601825753f7292b12 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:25:53 -0600 Subject: Supply params for terms auth stage As per https://github.com/matrix-org/matrix-doc/pull/1692 --- synapse/handlers/auth.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f08a2cdd7e..d6a19b74e9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -466,6 +466,15 @@ class AuthHandler(BaseHandler): def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} + def _get_params_terms(self): + return { + "policies": [{ + "name": "Privacy Policy", + "version": self.hs.config.user_consent_version, + "url": "%s/_matrix/consent/public" % (self.hs.config.public_baseurl,), + }], + } + def _auth_dict_for_flows(self, flows, session): public_flows = [] for f in flows: -- cgit 1.4.1 From 3099d96dba1c5a24cdd81575f6b8b8e07a9e8c94 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:19 -0600 Subject: Flesh out the fallback auth for terms --- synapse/rest/client/v2_alpha/auth.py | 74 ++++++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bc3bfee4a0..f86f09adcf 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -68,6 +68,29 @@ function captchaDone() { """ +TERMS_TEMPLATE = """ + + +Authentication + + + + +
+
+

+ Please click the button below if you agree to the + privacy policy of this homeserver. +

+ + +
+
+ + +""" + SUCCESS_TEMPLATE = """ @@ -138,13 +161,16 @@ class AuthRestServlet(RestServlet): authdict = { 'session': session, } - success = yield self.auth_handler.add_oob_auth( - LoginType.TERMS, - authdict, - self.hs.get_ip_from_request(request) - ) - html = "hai" + html = TERMS_TEMPLATE % { + 'session': session, + 'terms_url': "%s/_matrix/consent/public" % ( + self.hs.config.public_baseurl, + ), + 'myurl': "%s/auth/%s/fallback/web" % ( + CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + ), + } html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") @@ -159,7 +185,7 @@ class AuthRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, stagetype): yield - if stagetype == "m.login.recaptcha": + if stagetype == LoginType.RECAPTCHA: if ('g-recaptcha-response' not in request.args or len(request.args['g-recaptcha-response'])) == 0: raise SynapseError(400, "No captcha response supplied") @@ -198,6 +224,40 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) + defer.returnValue(None) + elif stagetype == LoginType.TERMS: + if ('session' not in request.args or + len(request.args['session'])) == 0: + raise SynapseError(400, "No session supplied") + + session = request.args['session'][0] + authdict = {'session': session} + + success = yield self.auth_handler.add_oob_auth( + LoginType.TERMS, + authdict, + self.hs.get_ip_from_request(request) + ) + + if success: + html = SUCCESS_TEMPLATE + else: + html = TERMS_TEMPLATE % { + 'session': session, + 'terms_url': "%s/_matrix/consent/public" % ( + self.hs.config.public_baseurl, + ), + 'myurl': "%s/auth/%s/fallback/web" % ( + CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + ), + } + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) defer.returnValue(None) else: raise SynapseError(404, "Unknown auth stage type") -- cgit 1.4.1 From dfcad5fad5fbfac0a9182853d1acfe410e7cd888 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:32 -0600 Subject: Make the terms flow requried --- synapse/rest/client/v2_alpha/register.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dedf5269ed..78e63447a7 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -364,14 +364,14 @@ class RegisterRestServlet(RestServlet): for flow in flows: # To only allow registration if completing GDPR auth, # making clients that don't support it use fallback auth. - #flow.append(LoginType.TERMS) + flow.append(LoginType.TERMS) # or to duplicate all the flows above with the GDPR flow on the # end so clients that support it can use it but clients that don't # continue to consent via the DM from server notices bot. - new_flows.extend([ - flow + [LoginType.TERMS] - ]) + #new_flows.extend([ + # flow + [LoginType.TERMS] + #]) flows.extend(new_flows) auth_result, params, session_id = yield self.auth_handler.check_auth( -- cgit 1.4.1 From f9d34a763c90811cd53965825799767569ba0e68 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:54 -0600 Subject: Auto-consent to the privacy policy if the user registered with terms --- synapse/rest/client/v2_alpha/register.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 78e63447a7..851ce6e9a4 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -460,6 +460,12 @@ class RegisterRestServlet(RestServlet): params.get("bind_msisdn") ) + if auth_result and LoginType.TERMS in auth_result: + logger.info("User %s has consented to the privacy policy" % registered_user_id) + yield self.store.user_set_consent_version( + registered_user_id, self.hs.config.user_consent_version, + ) + defer.returnValue((200, return_dict)) def on_OPTIONS(self, _): -- cgit 1.4.1 From 537d0b7b3632789e40cec13f3120151098f11d75 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 17:50:11 -0600 Subject: Use a flag rather than a new route for the public policy This also means that the template now has optional parameters, which will need to be documented somehow. --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 4 ++-- synapse/rest/consent/consent_resource.py | 36 +++++++++++++++++++------------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d6a19b74e9..42d1336d6e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -471,7 +471,7 @@ class AuthHandler(BaseHandler): "policies": [{ "name": "Privacy Policy", "version": self.hs.config.user_consent_version, - "url": "%s/_matrix/consent/public" % (self.hs.config.public_baseurl,), + "url": "%s/_matrix/consent?public=true" % (self.hs.config.public_baseurl,), }], } diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index f86f09adcf..77a5ea66f3 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -164,7 +164,7 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent/public" % ( + 'terms_url': "%s/_matrix/consent?public=true" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( @@ -244,7 +244,7 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent/public" % ( + 'terms_url': "%s/_matrix/consent?public=true" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 7362e1858d..7a5786f164 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -30,7 +30,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError from synapse.http.server import finish_request, wrap_html_request_handler -from synapse.http.servlet import parse_string +from synapse.http.servlet import parse_string, parse_boolean from synapse.types import UserID # language to use for the templates. TODO: figure this out from Accept-Language @@ -137,27 +137,35 @@ class ConsentResource(Resource): request (twisted.web.http.Request): """ - version = parse_string(request, "v", - default=self._default_consent_version) - username = parse_string(request, "u", required=True) - userhmac = parse_string(request, "h", required=True, encoding=None) + public_version = parse_boolean(request, "public", default=False) - self._check_hash(username, userhmac) + version = self._default_consent_version + username = None + userhmac = None + has_consented = False + if not public_version: + version = parse_string(request, "v", + default=self._default_consent_version) + username = parse_string(request, "u", required=True) + userhmac = parse_string(request, "h", required=True, encoding=None) - if username.startswith('@'): - qualified_user_id = username - else: - qualified_user_id = UserID(username, self.hs.hostname).to_string() + self._check_hash(username, userhmac) - u = yield self.store.get_user_by_id(qualified_user_id) - if u is None: - raise NotFoundError("Unknown user") + if username.startswith('@'): + qualified_user_id = username + else: + qualified_user_id = UserID(username, self.hs.hostname).to_string() + + u = yield self.store.get_user_by_id(qualified_user_id) + if u is None: + raise NotFoundError("Unknown user") + has_consented = u["consent_version"] == version try: self._render_template( request, "%s.html" % (version,), user=username, userhmac=userhmac, version=version, - has_consented=(u["consent_version"] == version), + has_consented=has_consented, public_version=public_version, ) except TemplateNotFound: raise NotFoundError("Unknown policy version") -- cgit 1.4.1 From 158d6c75b6a9d62c710ea612e1961fbb5772fd34 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 17:54:08 -0600 Subject: Changelog --- changelog.d/4004.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4004.feature diff --git a/changelog.d/4004.feature b/changelog.d/4004.feature new file mode 100644 index 0000000000..ef5cdaf5ec --- /dev/null +++ b/changelog.d/4004.feature @@ -0,0 +1 @@ +Add `m.login.terms` to the registration flow when consent tracking is enabled. **This makes the template arguments conditionally optional on a new `public_version` variable - update your privacy templates to support this.** -- cgit 1.4.1 From dd59dfc51ffdcb49e3ae46b9072cff86dfc32d94 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 4 Oct 2018 22:37:55 +1000 Subject: full version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 312e4e2fa6..43c5821ade 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.6rc1" +__version__ = "0.33.6" -- cgit 1.4.1 From 92faeb2a3fc73bd9307b3192aeacec4afb1aad1a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 4 Oct 2018 22:38:10 +1000 Subject: changelog --- CHANGES.md | 9 +++++++++ changelog.d/4002.misc | 1 - 2 files changed, 9 insertions(+), 1 deletion(-) delete mode 100644 changelog.d/4002.misc diff --git a/CHANGES.md b/CHANGES.md index 03d7412874..048b9f95db 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.33.6 (2018-10-04) +=========================== + +Internal Changes +---------------- + +- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002)) + + Synapse 0.33.6rc1 (2018-10-03) ============================== diff --git a/changelog.d/4002.misc b/changelog.d/4002.misc deleted file mode 100644 index 545abe5d58..0000000000 --- a/changelog.d/4002.misc +++ /dev/null @@ -1 +0,0 @@ -Pin to prometheus_client<0.4 to avoid renaming all of our metrics \ No newline at end of file -- cgit 1.4.1 From 17d585753f1df2b2c2b13ddb8171e174cef97aac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:18:52 +0100 Subject: Delete unreferened state groups during purge --- synapse/storage/events.py | 33 +++++++++++++++++++++++++------ synapse/storage/state.py | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e7487311ce..0fb190530a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2025,6 +2025,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding state groups which depend on redundant" " state groups") remaining_state_groups = [] + unreferenced_state_groups = 0 for i in range(0, len(state_rows), 100): chunk = [sg for sg, in state_rows[i:i + 100]] # look for state groups whose prev_state_group is one we are about @@ -2037,13 +2038,33 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore retcols=["state_group"], keyvalues={}, ) - remaining_state_groups.extend( - row["state_group"] for row in rows - # exclude state groups we are about to delete: no point in - # updating them - if row["state_group"] not in state_groups_to_delete - ) + for row in rows: + sg = row["state_group"] + + if sg in state_groups_to_delete: + # exclude state groups we are about to delete: no point in + # updating them + continue + + if not self._is_state_group_referenced(txn, sg): + # Let's also delete unreferenced state groups while we're + # here, since otherwise we'd need to de-delta them + state_groups_to_delete.add(sg) + unreferenced_state_groups += 1 + continue + + remaining_state_groups.append(sg) + + logger.info( + "[purge] found %i extra unreferenced state groups to delete", + unreferenced_state_groups, + ) + + logger.info( + "[purge] de-delta-ing %i remaining state groups", + len(remaining_state_groups), + ) # Now we turn the state groups that reference to-be-deleted state # groups to non delta versions. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c4..b88c7dc091 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,6 +1041,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count + def _is_state_group_referenced(self, txn, state_group): + """Checks if a given state group is referenced, or is safe to delete. + + A state groups is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a group which has the given + state_group as a prev group) + """ + + # We check this by doing a depth first search to look for any + # descendant referenced by `event_to_state_groups`. + + # State groups we need to check, contains state groups that are + # descendants of `state_group` + state_groups_to_search = [state_group] + + # Set of state groups we've already checked + state_groups_searched = set() + + while state_groups_to_search: + state_group = state_groups_to_search.pop() # Next state group to check + + is_referenced = self._simple_select_one_onecol_txn( + txn, + table="event_to_state_groups", + keyvalues={"state_group": state_group}, + retcol="event_id", + allow_none=True, + ) + if is_referenced: + # A descendant is referenced by event_to_state_groups, so + # original state group is referenced. + return True + + state_groups_searched.add(state_group) + + # Find all children of current state group and add to search + references = self._simple_select_onecol_txn( + txn, + table="state_group_edges", + keyvalues={"prev_state_group": state_group}, + retcol="state_group", + ) + state_groups_to_search.extend(references) + + # Lets be paranoid and check for cycles + if state_groups_searched.intersection(references): + raise Exception("State group %s has cyclic dependency", state_group) + + return False + class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. -- cgit 1.4.1 From 4917ff55234718c0e650c6dc2a1117304465b9be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:24:01 +0100 Subject: Add state_group index to event_to_state_groups This is needed to efficiently check for unreferenced state groups during purge. --- synapse/storage/prepare_database.py | 2 +- .../delta/52/add_event_to_state_group_index.sql | 19 +++++++++++++++++++ synapse/storage/state.py | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/52/add_event_to_state_group_index.sql diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index b364719312..bd740e1e45 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 51 +SCHEMA_VERSION = 52 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql new file mode 100644 index 0000000000..91e03d13e1 --- /dev/null +++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql @@ -0,0 +1,19 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This is needed to efficiently check for unreferenced state groups during +-- purge. Added events_to_state_group(state_group) index +INSERT into background_updates (update_name, progress_json) + VALUES ('event_to_state_groups_sg_index', '{}'); diff --git a/synapse/storage/state.py b/synapse/storage/state.py index b88c7dc091..3f08f447a3 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1114,6 +1114,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" + EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" def __init__(self, db_conn, hs): super(StateStore, self).__init__(db_conn, hs) @@ -1132,6 +1133,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): columns=["state_key"], where_clause="type='m.room.member'", ) + self.register_background_index_update( + self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME, + index_name="event_to_state_groups_sg_index", + table="event_to_state_groups", + columns=["state_group"], + ) def _store_event_state_mappings_txn(self, txn, events_and_contexts): state_groups = {} -- cgit 1.4.1 From d9f3db5081a4306520cb3c3208564d3c923fbf0a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:29:30 +0100 Subject: Newsfile --- changelog.d/4006.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4006.misc diff --git a/changelog.d/4006.misc b/changelog.d/4006.misc new file mode 100644 index 0000000000..35ffa1c2d2 --- /dev/null +++ b/changelog.d/4006.misc @@ -0,0 +1 @@ +Delete unreferenced state groups during history purge -- cgit 1.4.1 From 2dadc092b8f0e4cc1ac037ea54324efd906d4caf Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 4 Oct 2018 17:00:27 +0100 Subject: move logic into register, fix room alias localpart bug, tests --- synapse/handlers/register.py | 45 ++++++++++++------------------ tests/handlers/test_register.py | 62 ++++++++++++++++++++++++++++------------- tests/utils.py | 1 + 3 files changed, 62 insertions(+), 46 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7584064d5d..01cf7ab58e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -220,8 +220,26 @@ class RegistrationHandler(BaseHandler): # auto-join the user to any rooms we're supposed to dump them into fake_requester = create_requester(user_id) + + # try to create the room if we're the first user on the server + if self.hs.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + auto_create_rooms = count == 1 + for r in self.hs.config.auto_join_rooms: try: + if auto_create_rooms and RoomAlias.is_valid(r): + room_creation_handler = self.hs.get_room_creation_handler() + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, + ) yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) @@ -514,33 +532,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - # try to create the room if we're the first user on the server - if self.hs.config.autocreate_auto_join_rooms: - count = yield self.store.count_all_users() - if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = self.hs.get_room_creation_handler() - info = yield room_creation_handler.create_room( - requester, - config={ - "preset": "public_chat", - }, - ratelimit=False, - ) - room_id = info["room_id"] - - directory_handler = self.hs.get_handlers().directory_handler - room_alias = RoomAlias.from_string(room_identifier) - yield directory_handler.create_association( - user_id=requester.user.to_string(), - room_alias=room_alias, - room_id=room_id, - servers=[self.hs.hostname], - ) - - yield directory_handler.send_room_alias_update_event( - requester, requester.user.to_string(), room_id - ) - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 7b4ade3dfb..a150a897ac 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.errors import ResourceLimitError from synapse.handlers.register import RegistrationHandler -from synapse.types import UserID, create_requester +from synapse.types import RoomAlias, UserID, create_requester from tests.utils import setup_test_homeserver @@ -41,30 +41,28 @@ class RegistrationTestCase(unittest.TestCase): self.mock_captcha_client = Mock() self.hs = yield setup_test_homeserver( self.addCleanup, - handlers=None, - http_client=None, expire_access_token=True, - profile_handler=Mock(), ) self.macaroon_generator = Mock( generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - self.hs.handlers = RegistrationHandlers(self.hs) + # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 self.lots_of_users = 100 self.small_number_of_users = 1 + self.requester = create_requester("@requester:test") + @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): - local_part = "someone" - display_name = "someone" - user_id = "@someone:test" - requester = create_requester("@as:test") + frank = UserID.from_string("@frank:test") + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, frank.localpart, "Frankie" ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -78,12 +76,11 @@ class RegistrationTestCase(unittest.TestCase): token="jkv;g498752-43gj['eamb!-5", password_hash=None, ) - local_part = "frank" - display_name = "Frank" - user_id = "@frank:test" - requester = create_requester("@as:test") + local_part = frank.localpart + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, local_part, None ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -92,7 +89,7 @@ class RegistrationTestCase(unittest.TestCase): def test_mau_limits_when_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - yield self.handler.get_or_create_user("requester", 'a', "display_name") + yield self.handler.get_or_create_user(self.requester, 'a', "display_name") @defer.inlineCallbacks def test_get_or_create_user_mau_not_blocked(self): @@ -101,7 +98,7 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception - yield self.handler.get_or_create_user("@user:server", 'c', "User") + yield self.handler.get_or_create_user(self.requester, 'c', "User") @defer.inlineCallbacks def test_get_or_create_user_mau_blocked(self): @@ -110,13 +107,13 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.lots_of_users) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") self.store.get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") @defer.inlineCallbacks def test_register_mau_blocked(self): @@ -147,3 +144,30 @@ class RegistrationTestCase(unittest.TestCase): ) with self.assertRaises(ResourceLimitError): yield self.handler.register_saml2(localpart="local_part") + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms(self): + room_alias_str = "#room:test" + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [room_alias_str] + + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) + + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = yield directory_handler.get_association(room_alias) + + self.assertTrue(room_id['room_id'] in rooms) + self.assertEqual(len(rooms), 1) + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms_with_no_rooms(self): + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + + self.assertEqual(len(rooms), 0) diff --git a/tests/utils.py b/tests/utils.py index aaed1149c3..d34c224fb1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -149,6 +149,7 @@ def setup_test_homeserver( config.block_events_without_consent_error = None config.media_storage_providers = [] config.auto_join_rooms = [] + config.autocreate_auto_join_rooms = True config.limit_usage_by_mau = False config.hs_disabled = False config.hs_disabled_message = "" -- cgit 1.4.1 From f7199e873448794ffc88a08a9d7c01f1be0dfca1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Oct 2018 11:23:08 +0100 Subject: Log looping call exceptions If a looping call function errors, then it kills the loop entirely. Currently it throws away the exception logs, so we should make it actually log them. Fixes #3929 --- synapse/util/__init__.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 680ea928c7..c237d003bc 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools import logging from itertools import islice @@ -66,7 +67,7 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(f) + call = task.LoopingCall(_log_exception_wrapper(f)) call.clock = self._reactor call.start(msec / 1000.0, now=False) return call @@ -109,3 +110,19 @@ def batch_iter(iterable, size): sourceiter = iter(iterable) # call islice until it returns an empty tuple return iter(lambda: tuple(islice(sourceiter, size)), ()) + + +def _log_exception_wrapper(f): + """Used to wrap looping calls to log loudly if they get killed + """ + + @functools.wraps(f) + def wrap(*args, **kwargs): + try: + logger.info("Running looping call") + return f(*args, **kwargs) + except: # noqa: E722, as we reraise the exception this is fine. + logger.exception("Looping called died") + raise + + return wrap -- cgit 1.4.1 From 8164f6daf362b7039d6dcc8b645f306cfd5c49a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Oct 2018 11:25:09 +0100 Subject: Newsfile --- changelog.d/4008.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4008.misc diff --git a/changelog.d/4008.misc b/changelog.d/4008.misc new file mode 100644 index 0000000000..5730210054 --- /dev/null +++ b/changelog.d/4008.misc @@ -0,0 +1 @@ +Log exceptions in looping calls -- cgit 1.4.1 From 497444f1fdd2c39906179b1dde8c67415e465398 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 5 Oct 2018 15:08:36 +0100 Subject: Don't reuse backup versions Since we don't actually delete the keys, just mark the versions as deleted in the db rather than actually deleting them, then we won't reuse versions. Fixes https://github.com/vector-im/riot-web/issues/7448 --- synapse/storage/e2e_room_keys.py | 9 +++++++-- synapse/storage/schema/delta/51/e2e_room_keys.sql | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 969f4aef9c..4d439bb164 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -193,7 +193,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): @staticmethod def _get_current_version(txn, user_id): txn.execute( - "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + "SELECT MAX(version) FROM e2e_room_keys_versions " + "WHERE user_id=? AND deleted=0", (user_id,) ) row = txn.fetchone() @@ -226,6 +227,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): keyvalues={ "user_id": user_id, "version": this_version, + "deleted": 0, }, retcols=( "version", @@ -300,13 +302,16 @@ class EndToEndRoomKeyStore(SQLBaseStore): else: this_version = version - return self._simple_delete_one_txn( + return self._simple_update_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": this_version, }, + updatevalues={ + "deleted": 1, + } ) return self.runInteraction( diff --git a/synapse/storage/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/delta/51/e2e_room_keys.sql index 4531fd56ee..c0e66a697d 100644 --- a/synapse/storage/schema/delta/51/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/51/e2e_room_keys.sql @@ -32,7 +32,8 @@ CREATE TABLE e2e_room_keys_versions ( user_id TEXT NOT NULL, version TEXT NOT NULL, algorithm TEXT NOT NULL, - auth_data TEXT NOT NULL + auth_data TEXT NOT NULL, + deleted SMALLINT DEFAULT 0 NOT NULL ); CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); -- cgit 1.4.1 From 8a1817f0d29308a233783d43cbf1ad27891120c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Oct 2018 13:26:54 +0100 Subject: Use errback pattern and catch async failures --- synapse/handlers/appservice.py | 7 ++++++- synapse/util/__init__.py | 43 ++++++++++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index f0f89af7dc..16b897eb18 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -28,6 +28,7 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import make_log_failure_errback from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure @@ -112,7 +113,11 @@ class ApplicationServicesHandler(object): if not self.started_scheduler: def start_scheduler(): - return self.scheduler.start().addErrback(log_failure) + return self.scheduler.start().addErrback( + make_log_failure_errback( + "Application Services Failure", + ) + ) run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index c237d003bc..964078aed4 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools import logging from itertools import islice @@ -67,9 +66,12 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(_log_exception_wrapper(f)) + call = task.LoopingCall(f) call.clock = self._reactor - call.start(msec / 1000.0, now=False) + d = call.start(msec / 1000.0, now=False) + d.addErrback(make_log_failure_errback( + "Looping call died", consumeErrors=False, + )) return call def call_later(self, delay, callback, *args, **kwargs): @@ -112,17 +114,30 @@ def batch_iter(iterable, size): return iter(lambda: tuple(islice(sourceiter, size)), ()) -def _log_exception_wrapper(f): - """Used to wrap looping calls to log loudly if they get killed +def make_log_failure_errback(msg, consumeErrors=True): + """Creates a function suitable for passing to `Deferred.addErrback` that + logs any failures that occur. + + Args: + msg (str): Message to log + consumeErrors (bool): If true consumes the failure, otherwise passes + on down the callback chain + + Returns: + func(Failure) """ - @functools.wraps(f) - def wrap(*args, **kwargs): - try: - logger.info("Running looping call") - return f(*args, **kwargs) - except: # noqa: E722, as we reraise the exception this is fine. - logger.exception("Looping called died") - raise + def log_failure(failure): + logger.error( + msg, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject() + ) + ) + + if not consumeErrors: + return failure - return wrap + return log_failure -- cgit 1.4.1 From 495975e231bd429eb0114d9258423a5e202dcb2b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Oct 2018 13:44:58 +0100 Subject: Optimisation for filter_events_for_server We're better off hashing just the event_id than the whole ((type, state_key), event_id) tuple - so use a dict instead of a set. Also, iteritems > items. --- changelog.d/4017.misc | 1 + synapse/visibility.py | 13 ++++++------- 2 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 changelog.d/4017.misc diff --git a/changelog.d/4017.misc b/changelog.d/4017.misc new file mode 100644 index 0000000000..b1ceb06560 --- /dev/null +++ b/changelog.d/4017.misc @@ -0,0 +1 @@ +Optimisation for serving federation requests \ No newline at end of file diff --git a/synapse/visibility.py b/synapse/visibility.py index d4680863d3..c64ad2144c 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -324,14 +324,13 @@ def filter_events_for_server(store, server_name, events): # server's domain. # # event_to_state_ids contains lots of duplicates, so it turns out to be - # cheaper to build a complete set of unique - # ((type, state_key), event_id) tuples, and then filter out the ones we - # don't want. + # cheaper to build a complete event_id => (type, state_key) dict, and then + # filter out the ones we don't want # - state_key_to_event_id_set = { - e + event_id_to_state_key = { + event_id: key for key_to_eid in itervalues(event_to_state_ids) - for e in key_to_eid.items() + for key, event_id in iteritems(key_to_eid) } def include(typ, state_key): @@ -346,7 +345,7 @@ def filter_events_for_server(store, server_name, events): event_map = yield store.get_events([ e_id - for key, e_id in state_key_to_event_id_set + for e_id, key in iteritems(event_id_to_state_key) if include(key[0], key[1]) ]) -- cgit 1.4.1 From 69823205722662a72e4203ec304ff595a0f6ecf7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Oct 2018 14:06:19 +0100 Subject: Remove unnecessary extra function call layer --- synapse/handlers/appservice.py | 18 +++--------------- synapse/util/__init__.py | 29 +++++++++++++---------------- 2 files changed, 16 insertions(+), 31 deletions(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 16b897eb18..17eedf4dbf 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -28,7 +28,7 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import make_log_failure_errback +from synapse.util import log_failure from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure @@ -37,17 +37,6 @@ logger = logging.getLogger(__name__) events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "") -def log_failure(failure): - logger.error( - "Application Services Failure", - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject() - ) - ) - - class ApplicationServicesHandler(object): def __init__(self, hs): @@ -114,10 +103,9 @@ class ApplicationServicesHandler(object): if not self.started_scheduler: def start_scheduler(): return self.scheduler.start().addErrback( - make_log_failure_errback( - "Application Services Failure", - ) + log_failure, "Application Services Failure", ) + run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 964078aed4..9a8fae0497 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -69,9 +69,9 @@ class Clock(object): call = task.LoopingCall(f) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) - d.addErrback(make_log_failure_errback( - "Looping call died", consumeErrors=False, - )) + d.addErrback( + log_failure, "Looping call died", consumeErrors=False, + ) return call def call_later(self, delay, callback, *args, **kwargs): @@ -114,7 +114,7 @@ def batch_iter(iterable, size): return iter(lambda: tuple(islice(sourceiter, size)), ()) -def make_log_failure_errback(msg, consumeErrors=True): +def log_failure(failure, msg, consumeErrors=True): """Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. @@ -127,17 +127,14 @@ def make_log_failure_errback(msg, consumeErrors=True): func(Failure) """ - def log_failure(failure): - logger.error( - msg, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject() - ) + logger.error( + msg, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject() ) + ) - if not consumeErrors: - return failure - - return log_failure + if not consumeErrors: + return failure -- cgit 1.4.1 From 0c905ee015fdbf0d61f46db600fd79a18d0b6376 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 09:39:13 +0100 Subject: be python3 compatible --- synapse/handlers/e2e_room_keys.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 2c330382cf..117144bb6e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -15,6 +15,7 @@ import logging +from six import iteritems from twisted.internet import defer from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError @@ -137,8 +138,8 @@ class E2eRoomKeysHandler(object): # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. - for room_id, room in room_keys['rooms'].iteritems(): - for session_id, session in room['sessions'].iteritems(): + for room_id, room in iteritems(room_keys['rooms']): + for session_id, session in iteritems(room['sessions']): yield self._upload_room_key( user_id, version, room_id, session_id, session ) -- cgit 1.4.1 From f4a4dbcad106b41e3307eb5ca541ff3087b788cf Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 09:47:04 +0100 Subject: Apparently this blank line is Very Important --- synapse/handlers/e2e_room_keys.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 117144bb6e..bf2a83cc31 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -16,6 +16,7 @@ import logging from six import iteritems + from twisted.internet import defer from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError -- cgit 1.4.1 From d3464ce708cd857216d88d3e67867f1cf0c8bf60 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 10:33:59 +0100 Subject: isort --- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index ea114bc8b4..539893a5d6 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, -- cgit 1.4.1 From d34657e1f27df042a33cee4ba8c0b7cbaa112186 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 11:15:54 +0100 Subject: Add changelog --- changelog.d/4019.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4019.feature diff --git a/changelog.d/4019.feature b/changelog.d/4019.feature new file mode 100644 index 0000000000..49e066d269 --- /dev/null +++ b/changelog.d/4019.feature @@ -0,0 +1 @@ +Add support for end-to-end key backup (MSC1687) -- cgit 1.4.1 From bdc27d6716d14128e25737865cc36c8adf42aeaa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Oct 2018 14:15:49 +0100 Subject: Add metric to count lazy member sync requests --- synapse/handlers/sync.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 67b8ca28c7..58edf21472 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -20,6 +20,8 @@ import logging from six import iteritems, itervalues +from prometheus_client import Counter + from twisted.internet import defer from synapse.api.constants import EventTypes, Membership @@ -36,6 +38,13 @@ from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) + +# Counts the number of times we got asked for a lazy loaded sync. Type is one of +# initial_sync, full_sate_sync or incremental_sync +lazy_member_sync_counter = Counter( + "synapse_handlers_sync_lazy_member_sync", "", ["type"], +) + # Store the cache that tracks which lazy-loaded members have been sent to a given # client for no more than 30 minutes. LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000 @@ -227,14 +236,19 @@ class SyncHandler(object): @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): + if since_token is None: + sync_type = "initial_sync" + elif full_state: + sync_type = "full_state_sync" + else: + sync_type = "incremental_sync" + context = LoggingContext.current_context() if context: - if since_token is None: - context.tag = "initial_sync" - elif full_state: - context.tag = "full_state_sync" - else: - context.tag = "incremental_sync" + context.tag = sync_type + + if sync_config.filter_collection.lazy_load_members(): + lazy_member_sync_counter.labels(sync_type).inc() if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling -- cgit 1.4.1 From 20733857abb01aee20ec9fd30dbd4b006aa5de23 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Oct 2018 14:16:48 +0100 Subject: Newsfile --- changelog.d/4022.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4022.misc diff --git a/changelog.d/4022.misc b/changelog.d/4022.misc new file mode 100644 index 0000000000..f1d49932e1 --- /dev/null +++ b/changelog.d/4022.misc @@ -0,0 +1 @@ +Add metric to count lazy member sync requests -- cgit 1.4.1 From b8d9e108be60b3d14a57238562bfb5f49781c2a6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 18:04:21 +0100 Subject: Fix mergefail --- synapse/api/errors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 4124469442..0a6e78711f 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -344,7 +344,7 @@ class IncompatibleRoomVersionError(SynapseError): return cs_error( self.msg, self.errcode, - room_version=self.current_version, + room_version=self._room_version, ) -- cgit 1.4.1 From 395276b4051123d369aab5deab020f816479c63b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 09:24:39 +0100 Subject: Append _total to metric and fix up spelling --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 58edf21472..41daa1d888 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -40,9 +40,9 @@ logger = logging.getLogger(__name__) # Counts the number of times we got asked for a lazy loaded sync. Type is one of -# initial_sync, full_sate_sync or incremental_sync +# initial_sync, full_state_sync or incremental_sync lazy_member_sync_counter = Counter( - "synapse_handlers_sync_lazy_member_sync", "", ["type"], + "synapse_handlers_sync_lazy_member_sync_total", "", ["type"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given -- cgit 1.4.1 From 3cbe8331e60559008875679fab51423eedb242c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 11:23:17 +0100 Subject: Track number of non-empty sync responses instead --- synapse/handlers/sync.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 41daa1d888..5cae48436f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -39,10 +39,12 @@ from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) -# Counts the number of times we got asked for a lazy loaded sync. Type is one of -# initial_sync, full_state_sync or incremental_sync -lazy_member_sync_counter = Counter( - "synapse_handlers_sync_lazy_member_sync_total", "", ["type"], +# Counts the number of times we returned a non-empty sync. `type` is one of +# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is +# "true" or "false" depending on if the request asked for lazy loaded members or +# not. +non_empty_sync_counter = Counter( + "synapse_handlers_sync_nonempty_total", "", ["type", "lazy_loaded"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given @@ -247,16 +249,12 @@ class SyncHandler(object): if context: context.tag = sync_type - if sync_config.filter_collection.lazy_load_members(): - lazy_member_sync_counter.labels(sync_type).inc() - if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. result = yield self.current_sync_for_user( sync_config, since_token, full_state=full_state, ) - defer.returnValue(result) else: def current_sync_callback(before_token, after_token): return self.current_sync_for_user(sync_config, since_token) @@ -265,7 +263,15 @@ class SyncHandler(object): sync_config.user.to_string(), timeout, current_sync_callback, from_token=since_token, ) - defer.returnValue(result) + + if result: + if sync_config.filter_collection.lazy_load_members(): + lazy_loaded = "true" + else: + lazy_loaded = "false" + non_empty_sync_counter.labels(sync_type, lazy_loaded).inc() + + defer.returnValue(result) def current_sync_for_user(self, sync_config, since_token=None, full_state=False): -- cgit 1.4.1 From 49840f5ab23f7c3bdaee72e2bdad2d458f4446d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 11:31:02 +0100 Subject: Update newsfile --- changelog.d/4022.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/4022.misc b/changelog.d/4022.misc index f1d49932e1..5b0e136795 100644 --- a/changelog.d/4022.misc +++ b/changelog.d/4022.misc @@ -1 +1 @@ -Add metric to count lazy member sync requests +Add metric to count number of non-empty sync responses -- cgit 1.4.1 From 7e561b5c1a0cf0177f14e67851bb7e0ffaeda042 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 11:40:43 +0100 Subject: Add description to counter metric --- synapse/handlers/sync.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5cae48436f..351892a94f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -44,7 +44,11 @@ logger = logging.getLogger(__name__) # "true" or "false" depending on if the request asked for lazy loaded members or # not. non_empty_sync_counter = Counter( - "synapse_handlers_sync_nonempty_total", "", ["type", "lazy_loaded"], + "synapse_handlers_sync_nonempty_total", + "Count of non empty sync responses. type is initial_sync/full_state_sync" + "/incremental_sync. lazy_loaded indicates if lazy loaded members were " + "enabled for that request.", + ["type", "lazy_loaded"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given -- cgit 1.4.1 From 8ddd0f273c501b83b75a58e379d73aef3cfab35e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 09:55:41 +0100 Subject: Comments on get_all_new_events_stream just some docstrings to clarify the behaviour here --- synapse/storage/stream.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 4c296d72c0..d6cfdba519 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -630,7 +630,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_all_new_events_stream(self, from_id, current_id, limit): - """Get all new events""" + """Get all new events + + Returns all events with from_id < stream_ordering <= current_id. + + Args: + from_id (int): the stream_ordering of the last event we processed + current_id (int): the stream_ordering of the most recently processed event + limit (int): the maximum number of events to return + + Returns: + Deferred[Tuple[int, list[FrozenEvent]]]: A tuple of (next_id, events), where + `next_id` is the next value to pass as `from_id` (it will either be the + stream_ordering of the last returned event, or, if fewer than `limit` events + were found, `current_id`. + """ def get_all_new_events_stream_txn(txn): sql = ( -- cgit 1.4.1 From 83e72bb2f0c6ef282190a378941c856afbb33c16 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:26:18 +0100 Subject: PR feedback pt. 1 --- synapse/api/errors.py | 8 ------ synapse/handlers/e2e_room_keys.py | 41 ++++++++++++++++--------------- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 0a6e78711f..48b903374d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -348,14 +348,6 @@ class IncompatibleRoomVersionError(SynapseError): ) -def cs_exception(exception): - if isinstance(exception, CodeMessageException): - return exception.error_dict() - else: - logger.error("Unknown exception type: %s", type(exception)) - return {} - - def cs_error(msg, code=Codes.UNKNOWN, **kwargs): """ Utility method for constructing an error response for client-server interactions. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index bf2a83cc31..4e3141dac8 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,11 @@ class E2eRoomKeysHandler(object): room, or a given session. See EndToEndRoomKeyStore.get_e2e_room_keys for full details. + Args: + user_id(str): the user whose keys we're getting + version(str): the version ID of the backup we're getting keys from + room_id(string): room ID to get keys for, for None to get keys for all rooms + session_id(string): session ID to get keys for, for None to get keys for all sessions Returns: A deferred list of dicts giving the session_data and message metadata for these room keys. @@ -72,6 +77,11 @@ class E2eRoomKeysHandler(object): room or a given session. See EndToEndRoomKeyStore.delete_e2e_room_keys for full details. + Args: + user_id(str): the user whose backup we're deleting + version(str): the version ID of the backup we're deleting + room_id(string): room ID to delete keys for, for None to delete keys for all rooms + session_id(string): session ID to delete keys for, for None to delete keys for all sessions Returns: A deferred of the deletion transaction """ @@ -118,24 +128,24 @@ class E2eRoomKeysHandler(object): # Check that the version we're trying to upload is the current version try: - version_info = yield self._get_version_info_unlocked(user_id) + version_info = yield self.store.get_e2e_room_keys_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) else: - raise e + raise if version_info['version'] != version: # Check that the version we're trying to upload actually exists try: - version_info = yield self._get_version_info_unlocked(user_id, version) + version_info = yield self.store.get_e2e_room_keys_version_info(user_id, version) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info['version']) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) else: - raise e + raise # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. @@ -168,9 +178,9 @@ class E2eRoomKeysHandler(object): if e.code == 404: pass else: - raise e + raise - if E2eRoomKeysHandler._should_replace_room_key(current_room_key, room_key): + if self._should_replace_room_key(current_room_key, room_key): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) @@ -195,14 +205,14 @@ class E2eRoomKeysHandler(object): # purely for legibility. if room_key['is_verified'] and not current_room_key['is_verified']: - pass + return True elif ( room_key['first_message_index'] < current_room_key['first_message_index'] ): - pass + return True elif room_key['forwarded_count'] < current_room_key['forwarded_count']: - pass + return True else: return False return True @@ -256,18 +266,9 @@ class E2eRoomKeysHandler(object): """ with (yield self._upload_linearizer.queue(user_id)): - res = yield self._get_version_info_unlocked(user_id, version) + res = yield self.store.get_e2e_room_keys_version_info(user_id, version) defer.returnValue(res) - @defer.inlineCallbacks - def _get_version_info_unlocked(self, user_id, version=None): - """Get the info about a given version of the user's backup - without obtaining the upload_linearizer lock. For params see get_version_info - """ - - results = yield self.store.get_e2e_room_keys_version_info(user_id, version) - defer.returnValue(results) - @defer.inlineCallbacks def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 539893a5d6..4807170ea6 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -- cgit 1.4.1 From 86ef9760a786e7de128120c11d623e3303178505 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:35:08 +0100 Subject: Split /room_keys/version into 2 servlets --- synapse/rest/client/v2_alpha/room_keys.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 4807170ea6..aee6419179 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -240,9 +240,9 @@ class RoomKeysServlet(RestServlet): defer.returnValue((200, {})) -class RoomKeysVersionServlet(RestServlet): +class RoomKeysNewVersionServlet(RestServlet): PATTERNS = client_v2_patterns( - "/room_keys/version(/(?P[^/]+))?$" + "/room_keys/version$" ) def __init__(self, hs): @@ -250,12 +250,12 @@ class RoomKeysVersionServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysVersionServlet, self).__init__() + super(RoomKeysNewVersionServlet, self).__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @defer.inlineCallbacks - def on_POST(self, request, version): + def on_POST(self, request): """ Create a new backup version for this user's room_keys with the given info. The version is allocated by the server and returned to the user @@ -285,10 +285,6 @@ class RoomKeysVersionServlet(RestServlet): "version": 12345 } """ - - if version: - raise SynapseError(405, "Cannot POST to a specific version") - requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() info = parse_json_object_from_request(request) @@ -301,6 +297,20 @@ class RoomKeysVersionServlet(RestServlet): # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning +class RoomKeysVersionServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/room_keys/version(/(?P[^/]+))?$" + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysVersionServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + @defer.inlineCallbacks def on_GET(self, request, version): """ @@ -320,7 +330,6 @@ class RoomKeysVersionServlet(RestServlet): "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } """ - requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -346,6 +355,8 @@ class RoomKeysVersionServlet(RestServlet): HTTP/1.1 200 OK {} """ + if version is None: + raise SynapseError(400, "No version specified to delete") requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -359,3 +370,4 @@ class RoomKeysVersionServlet(RestServlet): def register_servlets(hs, http_server): RoomKeysServlet(hs).register(http_server) RoomKeysVersionServlet(hs).register(http_server) + RoomKeysNewVersionServlet(hs).register(http_server) -- cgit 1.4.1 From bddfad253a2ca8ea0645d10e89055c4ce2ff1c38 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:48:02 +0100 Subject: Don't mangle exceptions --- synapse/rest/client/v2_alpha/room_keys.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index aee6419179..8d006d819d 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -339,9 +339,7 @@ class RoomKeysVersionServlet(RestServlet): ) except SynapseError as e: if e.code == 404: - e.errcode = Codes.NOT_FOUND - e.msg = "No backup found" - raise e + raise SynapseError(404, "No backup found", Codes.NOT_FOUND) defer.returnValue((200, info)) @defer.inlineCallbacks @@ -356,7 +354,7 @@ class RoomKeysVersionServlet(RestServlet): {} """ if version is None: - raise SynapseError(400, "No version specified to delete") + raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND) requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() -- cgit 1.4.1 From 306361b31bc0fb229de8db29209b9da38374d6f2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:48:56 +0100 Subject: Misc PR feedback bits --- synapse/storage/e2e_room_keys.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 4d439bb164..f25ded2295 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import simplejson as json +import json from twisted.internet import defer @@ -76,7 +76,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): session_id(str): the session whose room_key we're setting room_key(dict): the room_key being set Raises: - StoreError if stuff goes wrong, probably + StoreError """ yield self._simple_upsert( -- cgit 1.4.1 From 8c0ff0287af19cbae40fe7dd413f1e50578908c3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 13:47:43 +0100 Subject: Linting soothes the savage PEP8 monster --- synapse/handlers/e2e_room_keys.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 4e3141dac8..5edb3cfe04 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -53,7 +53,8 @@ class E2eRoomKeysHandler(object): user_id(str): the user whose keys we're getting version(str): the version ID of the backup we're getting keys from room_id(string): room ID to get keys for, for None to get keys for all rooms - session_id(string): session ID to get keys for, for None to get keys for all sessions + session_id(string): session ID to get keys for, for None to get keys for all + sessions Returns: A deferred list of dicts giving the session_data and message metadata for these room keys. @@ -80,8 +81,10 @@ class E2eRoomKeysHandler(object): Args: user_id(str): the user whose backup we're deleting version(str): the version ID of the backup we're deleting - room_id(string): room ID to delete keys for, for None to delete keys for all rooms - session_id(string): session ID to delete keys for, for None to delete keys for all sessions + room_id(string): room ID to delete keys for, for None to delete keys for all + rooms + session_id(string): session ID to delete keys for, for None to delete keys + for all sessions Returns: A deferred of the deletion transaction """ @@ -138,7 +141,9 @@ class E2eRoomKeysHandler(object): if version_info['version'] != version: # Check that the version we're trying to upload actually exists try: - version_info = yield self.store.get_e2e_room_keys_version_info(user_id, version) + version_info = yield self.store.get_e2e_room_keys_version_info( + user_id, version, + ) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info['version']) except StoreError as e: -- cgit 1.4.1 From 67a1e315cc7f8b270ec87e0ab6e58aa2adaee32d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 13:49:48 +0100 Subject: Fix up comments --- synapse/storage/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f08f447a3..f7cf5c86c9 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1044,9 +1044,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def _is_state_group_referenced(self, txn, state_group): """Checks if a given state group is referenced, or is safe to delete. - A state groups is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a group which has the given - state_group as a prev group) + A state group is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a state_group whose chain of + prev_groups includes the given state_group.) """ # We check this by doing a depth first search to look for any -- cgit 1.4.1 From 381d2cfdf0f02935b743f4b6dc1b5133d7ed27b7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 13 Oct 2018 00:14:08 +1100 Subject: Make workers work on Py3 (#4027) --- changelog.d/4027.bugfix | 1 + synapse/app/_base.py | 30 ++++++++++++------------ synapse/app/event_creator.py | 3 +++ synapse/app/pusher.py | 15 ++++++------ synapse/app/synchrotron.py | 12 +++++----- synapse/python_dependencies.py | 3 --- synapse/replication/slave/storage/_base.py | 9 +++++++ synapse/replication/slave/storage/deviceinbox.py | 12 +++++----- synapse/replication/slave/storage/devices.py | 11 +-------- synapse/replication/slave/storage/groups.py | 8 +++---- synapse/replication/slave/storage/keys.py | 14 +++++------ synapse/replication/slave/storage/presence.py | 6 ++--- synctl | 2 +- 13 files changed, 64 insertions(+), 62 deletions(-) create mode 100644 changelog.d/4027.bugfix diff --git a/changelog.d/4027.bugfix b/changelog.d/4027.bugfix new file mode 100644 index 0000000000..c9bbff3f68 --- /dev/null +++ b/changelog.d/4027.bugfix @@ -0,0 +1 @@ +Workers now start on Python 3. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 7c866e246a..18584226e9 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -17,6 +17,7 @@ import gc import logging import sys +import psutil from daemonize import Daemonize from twisted.internet import error, reactor @@ -24,12 +25,6 @@ from twisted.internet import error, reactor from synapse.util import PreserveLoggingContext from synapse.util.rlimit import change_resource_limit -try: - import affinity -except Exception: - affinity = None - - logger = logging.getLogger(__name__) @@ -89,15 +84,20 @@ def start_reactor( with PreserveLoggingContext(): logger.info("Running") if cpu_affinity is not None: - if not affinity: - quit_with_error( - "Missing package 'affinity' required for cpu_affinity\n" - "option\n\n" - "Install by running:\n\n" - " pip install affinity\n\n" - ) - logger.info("Setting CPU affinity to %s" % cpu_affinity) - affinity.set_process_affinity_mask(0, cpu_affinity) + # Turn the bitmask into bits, reverse it so we go from 0 up + mask_to_bits = bin(cpu_affinity)[2:][::-1] + + cpus = [] + cpu_num = 0 + + for i in mask_to_bits: + if i == "1": + cpus.append(cpu_num) + cpu_num += 1 + + p = psutil.Process() + p.cpu_affinity(cpus) + change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 9060ab14f6..e4a68715aa 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -178,6 +178,9 @@ def start(config_options): setup_logging(config, use_worker_options=True) + # This should only be done on the user directory worker or the master + config.update_user_directory = False + events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 630dcda478..0f9f8e19f6 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage._base import __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.pushers import SlavedPusherStore @@ -49,31 +50,31 @@ class PusherSlaveStore( SlavedAccountDataStore ): update_pusher_last_stream_ordering_and_success = ( - DataStore.update_pusher_last_stream_ordering_and_success.__func__ + __func__(DataStore.update_pusher_last_stream_ordering_and_success) ) update_pusher_failing_since = ( - DataStore.update_pusher_failing_since.__func__ + __func__(DataStore.update_pusher_failing_since) ) update_pusher_last_stream_ordering = ( - DataStore.update_pusher_last_stream_ordering.__func__ + __func__(DataStore.update_pusher_last_stream_ordering) ) get_throttle_params_by_room = ( - DataStore.get_throttle_params_by_room.__func__ + __func__(DataStore.get_throttle_params_by_room) ) set_throttle_params = ( - DataStore.set_throttle_params.__func__ + __func__(DataStore.set_throttle_params) ) get_time_of_last_push_action_before = ( - DataStore.get_time_of_last_push_action_before.__func__ + __func__(DataStore.get_time_of_last_push_action_before) ) get_profile_displayname = ( - DataStore.get_profile_displayname.__func__ + __func__(DataStore.get_profile_displayname) ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 9a7fc6ee9d..3926c7f263 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -33,7 +33,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource -from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore @@ -147,7 +147,7 @@ class SynchrotronPresence(object): and haven't come back yet. If there are poke the master about them. """ now = self.clock.time_msec() - for user_id, last_sync_ms in self.users_going_offline.items(): + for user_id, last_sync_ms in list(self.users_going_offline.items()): if now - last_sync_ms > 10 * 1000: self.users_going_offline.pop(user_id, None) self.send_user_sync(user_id, False, last_sync_ms) @@ -156,9 +156,9 @@ class SynchrotronPresence(object): # TODO Hows this supposed to work? pass - get_states = PresenceHandler.get_states.__func__ - get_state = PresenceHandler.get_state.__func__ - current_state_for_users = PresenceHandler.current_state_for_users.__func__ + get_states = __func__(PresenceHandler.get_states) + get_state = __func__(PresenceHandler.get_state) + current_state_for_users = __func__(PresenceHandler.current_state_for_users) def user_syncing(self, user_id, affect_presence): if affect_presence: @@ -208,7 +208,7 @@ class SynchrotronPresence(object): ) for row in rows] for state in states: - self.user_to_current_state[row.user_id] = state + self.user_to_current_state[state.user_id] = state stream_id = token yield self.notify_from_replication(states, stream_id) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index d4d983b00a..2947f37f1a 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -82,9 +82,6 @@ CONDITIONAL_REQUIREMENTS = { "psutil": { "psutil>=2.0.0": ["psutil>=2.0.0"], }, - "affinity": { - "affinity": ["affinity"], - }, "postgres": { "psycopg2>=2.6": ["psycopg2"] } diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 3f7be74e02..2d81d49e9a 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -15,6 +15,8 @@ import logging +import six + from synapse.storage._base import SQLBaseStore from synapse.storage.engines import PostgresEngine @@ -23,6 +25,13 @@ from ._slaved_id_tracker import SlavedIdTracker logger = logging.getLogger(__name__) +def __func__(inp): + if six.PY3: + return inp + else: + return inp.__func__ + + class BaseSlavedStore(SQLBaseStore): def __init__(self, db_conn, hs): super(BaseSlavedStore, self).__init__(db_conn, hs) diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 87eaa53004..4f19fd35aa 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -17,7 +17,7 @@ from synapse.storage import DataStore from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -43,11 +43,11 @@ class SlavedDeviceInboxStore(BaseSlavedStore): expiry_ms=30 * 60 * 1000, ) - get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__ - get_new_messages_for_device = DataStore.get_new_messages_for_device.__func__ - get_new_device_msgs_for_remote = DataStore.get_new_device_msgs_for_remote.__func__ - delete_messages_for_device = DataStore.delete_messages_for_device.__func__ - delete_device_msgs_for_remote = DataStore.delete_device_msgs_for_remote.__func__ + get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token) + get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device) + get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote) + delete_messages_for_device = __func__(DataStore.delete_messages_for_device) + delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote) def stream_positions(self): result = super(SlavedDeviceInboxStore, self).stream_positions() diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 21b8c468fa..ec2fd561cc 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -13,23 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from synapse.storage import DataStore from synapse.storage.end_to_end_keys import EndToEndKeyStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker -def __func__(inp): - if six.PY3: - return inp - else: - return inp.__func__ - - class SlavedDeviceStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedDeviceStore, self).__init__(db_conn, hs) diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 5777f07c8d..e933b170bb 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -16,7 +16,7 @@ from synapse.storage import DataStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -33,9 +33,9 @@ class SlavedGroupServerStore(BaseSlavedStore): "_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(), ) - get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__ - get_group_stream_token = DataStore.get_group_stream_token.__func__ - get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__ + get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user) + get_group_stream_token = __func__(DataStore.get_group_stream_token) + get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user) def stream_positions(self): result = super(SlavedGroupServerStore, self).stream_positions() diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index 05ed168463..8032f53fec 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -16,7 +16,7 @@ from synapse.storage import DataStore from synapse.storage.keys import KeyStore -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ class SlavedKeyStore(BaseSlavedStore): @@ -24,11 +24,11 @@ class SlavedKeyStore(BaseSlavedStore): "_get_server_verify_key" ] - get_server_verify_keys = DataStore.get_server_verify_keys.__func__ - store_server_verify_key = DataStore.store_server_verify_key.__func__ + get_server_verify_keys = __func__(DataStore.get_server_verify_keys) + store_server_verify_key = __func__(DataStore.store_server_verify_key) - get_server_certificate = DataStore.get_server_certificate.__func__ - store_server_certificate = DataStore.store_server_certificate.__func__ + get_server_certificate = __func__(DataStore.get_server_certificate) + store_server_certificate = __func__(DataStore.store_server_certificate) - get_server_keys_json = DataStore.get_server_keys_json.__func__ - store_server_keys_json = DataStore.store_server_keys_json.__func__ + get_server_keys_json = __func__(DataStore.get_server_keys_json) + store_server_keys_json = __func__(DataStore.store_server_keys_json) diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 80b744082a..92447b00d4 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -17,7 +17,7 @@ from synapse.storage import DataStore from synapse.storage.presence import PresenceStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -34,8 +34,8 @@ class SlavedPresenceStore(BaseSlavedStore): "PresenceStreamChangeCache", self._presence_id_gen.get_current_token() ) - _get_active_presence = DataStore._get_active_presence.__func__ - take_presence_startup_info = DataStore.take_presence_startup_info.__func__ + _get_active_presence = __func__(DataStore._get_active_presence) + take_presence_startup_info = __func__(DataStore.take_presence_startup_info) _get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"] get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"] diff --git a/synctl b/synctl index 356e5cb6a7..09b64459b1 100755 --- a/synctl +++ b/synctl @@ -280,7 +280,7 @@ def main(): if worker.cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - for cache_name, factor in worker.cache_factors.iteritems(): + for cache_name, factor in iteritems(worker.cache_factors): os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) start_worker(worker.app, configfile, worker.configfile) -- cgit 1.4.1 From a45f2c3a002dd50ba4dc6dcbe45a51f2da4057b2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 14:33:55 +0100 Subject: missed one --- synapse/rest/client/v2_alpha/room_keys.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8d006d819d..45b5817d8b 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -297,6 +297,7 @@ class RoomKeysNewVersionServlet(RestServlet): # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning + class RoomKeysVersionServlet(RestServlet): PATTERNS = client_v2_patterns( "/room_keys/version(/(?P[^/]+))?$" -- cgit 1.4.1 From a2bfb778c8a2a91bdff1a5824571d91f4f4536d3 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 12 Oct 2018 18:17:36 +0100 Subject: improve auto room join logic, comments and tests --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 11 ++++++++++- synapse/handlers/register.py | 11 ++++++++--- tests/handlers/test_register.py | 21 +++++++++++++++++---- 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 5cd8ad6cca..496ba4f4a0 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -First user should autocreate autojoin rooms +Servers with auto join rooms, should autocreate those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index dcf2374ed2..43ff20a637 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,6 +15,8 @@ from distutils.util import strtobool +from synapse.config._base import ConfigError +from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols from ._base import Config @@ -44,6 +46,9 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + for room_alias in self.auto_join_rooms: + if not RoomAlias.is_valid(room_alias): + raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -100,7 +105,11 @@ class RegistrationConfig(Config): #auto_join_rooms: # - "#example:example.com" - # Have first user on server autocreate autojoin rooms + # Where auto_join_rooms are specified, setting this flag ensures that the + # the rooms exists by creating them when the first user on the + # homeserver registers. + # Setting to false means that if the rooms are not manually created, + # users cannot be auto joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 01cf7ab58e..2b269ab692 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) +from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -222,14 +223,19 @@ class RegistrationHandler(BaseHandler): fake_requester = create_requester(user_id) # try to create the room if we're the first user on the server + should_auto_create_rooms = False if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() - auto_create_rooms = count == 1 + should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: try: - if auto_create_rooms and RoomAlias.is_valid(r): + if should_auto_create_rooms: room_creation_handler = self.hs.get_room_creation_handler() + if self.hs.hostname != RoomAlias.from_string(r).domain: + raise ConfigError( + 'Cannot create room alias %s, it does not match server domain' + ) # create room expects the localpart of the room alias room_alias_localpart = RoomAlias.from_string(r).localpart yield room_creation_handler.create_room( @@ -531,7 +537,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index a150a897ac..3e9a190727 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -47,7 +47,6 @@ class RegistrationTestCase(unittest.TestCase): generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 @@ -148,9 +147,7 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [room_alias_str] - res = yield self.handler.register(localpart='jeff') rooms = yield self.store.get_rooms_for_user(res[0]) @@ -163,11 +160,27 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms_with_no_rooms(self): - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") res = yield self.handler.register(frank.localpart) self.assertEqual(res[0], frank.to_string()) rooms = yield self.store.get_rooms_for_user(res[0]) + self.assertEqual(len(rooms), 0) + + @defer.inlineCallbacks + def test_auto_create_auto_join_where_room_is_another_domain(self): + self.hs.config.auto_join_rooms = ["#room:another"] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + self.assertEqual(len(rooms), 0) + @defer.inlineCallbacks + def test_auto_create_auto_join_where_auto_create_is_false(self): + self.hs.config.autocreate_auto_join_rooms = False + room_alias_str = "#room:test" + self.hs.config.auto_join_rooms = [room_alias_str] + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) self.assertEqual(len(rooms), 0) -- cgit 1.4.1 From 7bb651de6a29e4b261b67aace1e0d5e4286b2e69 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 14:53:30 -0600 Subject: More sane handling of group errors and pep8 --- synapse/handlers/groups_local.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 68de8a8e53..66da6edad1 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import (SynapseError, HttpResponseException) +from synapse.api.errors import (HttpResponseException, SynapseError) from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -40,12 +40,13 @@ def _create_rerouter(func_name): d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) + def h(failure): failure.trap(HttpResponseException) e = failure.value - if e.code >= 400 and e.code < 500: - raise SynapseError(e.code, e.msg) - failure.raiseException() + if e.code == 403: + raise e.to_synapse_error() + return failure d.addErrback(h) return d return f -- cgit 1.4.1 From 164f8e484356f0dc8bc32f30f668d4b69b46000b Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 15:11:59 -0600 Subject: isort --- synapse/handlers/groups_local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 66da6edad1..cb7e7d49cb 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import (HttpResponseException, SynapseError) +from synapse.api.errors import HttpResponseException, SynapseError from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) -- cgit 1.4.1 From 22a20044280d6c0a16ad9e94baf486046d536e5c Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 17:53:14 -0600 Subject: Update documentation and templates for new consent --- docs/consent_tracking.md | 13 +++++++++---- docs/privacy_policy_templates/en/1.0.html | 15 +++++++++------ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md index 064eae82f7..3634d13d4f 100644 --- a/docs/consent_tracking.md +++ b/docs/consent_tracking.md @@ -31,7 +31,7 @@ Note that the templates must be stored under a name giving the language of the template - currently this must always be `en` (for "English"); internationalisation support is intended for the future. -The template for the policy itself should be versioned and named according to +The template for the policy itself should be versioned and named according to the version: for example `1.0.html`. The version of the policy which the user has agreed to is stored in the database. @@ -81,9 +81,9 @@ should be a matter of `pip install Jinja2`. On debian, try `apt-get install python-jinja2`. Once this is complete, and the server has been restarted, try visiting -`https:///_matrix/consent`. If correctly configured, this should give -an error "Missing string query parameter 'u'". It is now possible to manually -construct URIs where users can give their consent. +`https:///_matrix/consent`. If correctly configured, you should see a +default policy document. It is now possible to manually construct URIs where +users can give their consent. ### Constructing the consent URI @@ -106,6 +106,11 @@ query parameters: `https:///_matrix/consent?u=&h=68a152465a4d...`. +Note that not providing a `u` parameter will be interpreted as wanting to view +the document from an unauthenticated perspective, such as prior to registration. +Therefore, the `h` parameter is not required in this scenario. + + Sending users a server notice asking them to agree to the policy ---------------------------------------------------------------- diff --git a/docs/privacy_policy_templates/en/1.0.html b/docs/privacy_policy_templates/en/1.0.html index 55c5e4b612..321c7e4671 100644 --- a/docs/privacy_policy_templates/en/1.0.html +++ b/docs/privacy_policy_templates/en/1.0.html @@ -12,12 +12,15 @@

All your base are belong to us.

-
- - - - -
+ {% if not public_version %} + +
+ + + + +
+ {% endif %} {% endif %} -- cgit 1.4.1 From 5119818e9d7dac97854868af102476df57f599e5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 17:54:28 -0600 Subject: Rely on the lack of ?u to represent public access also general cleanup --- synapse/rest/client/v2_alpha/auth.py | 4 ++-- synapse/rest/consent/consent_resource.py | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 77a5ea66f3..ec583ad16a 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -164,7 +164,7 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent?public=true" % ( + 'terms_url': "%s/_matrix/consent" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( @@ -244,7 +244,7 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent?public=true" % ( + 'terms_url': "%s/_matrix/consent" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 7a5786f164..4cadd71d7e 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -30,7 +30,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError from synapse.http.server import finish_request, wrap_html_request_handler -from synapse.http.servlet import parse_string, parse_boolean +from synapse.http.servlet import parse_string from synapse.types import UserID # language to use for the templates. TODO: figure this out from Accept-Language @@ -137,16 +137,12 @@ class ConsentResource(Resource): request (twisted.web.http.Request): """ - public_version = parse_boolean(request, "public", default=False) - - version = self._default_consent_version - username = None + version = parse_string(request, "v", default=self._default_consent_version) + username = parse_string(request, "u", required=False, default="") userhmac = None has_consented = False + public_version = username != "" if not public_version: - version = parse_string(request, "v", - default=self._default_consent_version) - username = parse_string(request, "u", required=True) userhmac = parse_string(request, "h", required=True, encoding=None) self._check_hash(username, userhmac) -- cgit 1.4.1 From dd99db846d76d511fc7bbea80897b9101782ec1f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 18:03:27 -0600 Subject: Update login terms structure for the proposed language support --- synapse/handlers/auth.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 42d1336d6e..9038fee264 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -468,11 +468,14 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { - "policies": [{ - "name": "Privacy Policy", + "policies": { + "privacy_policy": { "version": self.hs.config.user_consent_version, - "url": "%s/_matrix/consent?public=true" % (self.hs.config.public_baseurl,), - }], + "en": { + "name": "Privacy Policy", + "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + }, + }, } def _auth_dict_for_flows(self, flows, session): -- cgit 1.4.1 From 1ccafb0c5e3e6c9c64ce231163e7c73f05b910fa Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 13 Oct 2018 21:14:21 +0100 Subject: no need to join room if creator --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2b269ab692..2f7bdb0a20 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -246,7 +246,8 @@ class RegistrationHandler(BaseHandler): }, ratelimit=False, ) - yield self._join_user_to_room(fake_requester, r) + else: + yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) -- cgit 1.4.1 From fb216a22dbceebeee59aedfc6f888b73f4492908 Mon Sep 17 00:00:00 2001 From: Ivan Shapovalov Date: Sun, 14 Oct 2018 11:37:04 +0300 Subject: synapse/visibility.py: fix SyntaxError on py3.7 --- changelog.d/4033.bugfix | 1 + synapse/visibility.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4033.bugfix diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix new file mode 100644 index 0000000000..8f7a0bbbe9 --- /dev/null +++ b/changelog.d/4033.bugfix @@ -0,0 +1 @@ +Synapse now starts on Python 3.7. diff --git a/synapse/visibility.py b/synapse/visibility.py index c64ad2144c..43f48196be 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -219,7 +219,7 @@ def filter_events_for_server(store, server_name, events): # Whatever else we do, we need to check for senders which have requested # erasure of their data. erased_senders = yield store.are_users_erased( - e.sender for e in events, + (e.sender for e in events), ) def redact_disallowed(event, state): -- cgit 1.4.1 From 06bc8d2fe551b4390cbca9838bc547c5d0021f6d Mon Sep 17 00:00:00 2001 From: Ivan Shapovalov Date: Sun, 14 Oct 2018 11:37:38 +0300 Subject: synapse/app: frontend_proxy.py: actually make workers work on py3 --- changelog.d/4033.bugfix | 1 + synapse/app/frontend_proxy.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix index 8f7a0bbbe9..4e9e38cdcf 100644 --- a/changelog.d/4033.bugfix +++ b/changelog.d/4033.bugfix @@ -1 +1,2 @@ Synapse now starts on Python 3.7. +_All_ workers now start on Python 3. diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index fc4b25de1c..f5c61dec5b 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -68,7 +68,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet): "Authorization": auth_headers, } result = yield self.http_client.get_json( - self.main_uri + request.uri, + self.main_uri + request.uri.decode('ascii'), headers=headers, ) defer.returnValue((200, result)) @@ -125,7 +125,7 @@ class KeyUploadServlet(RestServlet): "Authorization": auth_headers, } result = yield self.http_client.post_json_get_json( - self.main_uri + request.uri, + self.main_uri + request.uri.decode('ascii'), body, headers=headers, ) -- cgit 1.4.1 From c187638ee9ccd9d87b5698d031d07aa7f0c84117 Mon Sep 17 00:00:00 2001 From: Slavi Pantaleev Date: Sun, 14 Oct 2018 21:50:18 +0300 Subject: Add information about the matrix-docker-ansible-deploy playbook Signed-off-by: Slavi Pantaleev --- README.rst | 6 ++++++ changelog.d/3698.misc | 1 + 2 files changed, 7 insertions(+) create mode 100644 changelog.d/3698.misc diff --git a/README.rst b/README.rst index e1ea351f84..456a3d9d43 100644 --- a/README.rst +++ b/README.rst @@ -174,6 +174,12 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/ +Slavi Pantaleev has created an Ansible playbook, +which installs the offical Docker image of Matrix Synapse +along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.). +For more details, see +https://github.com/spantaleev/matrix-docker-ansible-deploy + Configuring Synapse ------------------- diff --git a/changelog.d/3698.misc b/changelog.d/3698.misc new file mode 100644 index 0000000000..12537e76f2 --- /dev/null +++ b/changelog.d/3698.misc @@ -0,0 +1 @@ +Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook -- cgit 1.4.1 From f726f2dc6c398eb5fdf9d2dc0c56e96d930537ba Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 15 Oct 2018 22:21:45 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 43c5821ade..ea07fda14a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.6" +__version__ = "0.33.7rc1" -- cgit 1.4.1 From 4e50fe3edb94125d496f2f77da0d653054e3ac3c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 15 Oct 2018 22:22:49 +1100 Subject: update changelog --- changelog.d/4033.bugfix | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix index 4e9e38cdcf..05d54d9524 100644 --- a/changelog.d/4033.bugfix +++ b/changelog.d/4033.bugfix @@ -1,2 +1 @@ -Synapse now starts on Python 3.7. -_All_ workers now start on Python 3. +Synapse now starts on Python 3.7. \ No newline at end of file -- cgit 1.4.1 From 24bc15eab46992faff9976ddfe9b15c611eab600 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 15 Oct 2018 22:23:13 +1100 Subject: update changelog --- CHANGES.md | 28 ++++++++++++++++++++++++++++ changelog.d/3995.bugfix | 1 - changelog.d/3996.bugfix | 1 - changelog.d/3997.bugfix | 1 - changelog.d/3999.bugfix | 1 - changelog.d/4008.misc | 1 - changelog.d/4017.misc | 1 - changelog.d/4019.feature | 1 - changelog.d/4022.misc | 1 - changelog.d/4027.bugfix | 1 - changelog.d/4033.bugfix | 1 - 11 files changed, 28 insertions(+), 10 deletions(-) delete mode 100644 changelog.d/3995.bugfix delete mode 100644 changelog.d/3996.bugfix delete mode 100644 changelog.d/3997.bugfix delete mode 100644 changelog.d/3999.bugfix delete mode 100644 changelog.d/4008.misc delete mode 100644 changelog.d/4017.misc delete mode 100644 changelog.d/4019.feature delete mode 100644 changelog.d/4022.misc delete mode 100644 changelog.d/4027.bugfix delete mode 100644 changelog.d/4033.bugfix diff --git a/CHANGES.md b/CHANGES.md index 048b9f95db..6c80a86461 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,31 @@ +Synapse 0.33.7rc1 (2018-10-15) +============================== + +Features +-------- + +- Add support for end-to-end key backup (MSC1687) ([\#4019](https://github.com/matrix-org/synapse/issues/4019)) + + +Bugfixes +-------- + +- Fix bug in event persistence logic which caused 'NoneType is not iterable' ([\#3995](https://github.com/matrix-org/synapse/issues/3995)) +- Fix exception in background metrics collection ([\#3996](https://github.com/matrix-org/synapse/issues/3996)) +- Fix exception handling in fetching remote profiles ([\#3997](https://github.com/matrix-org/synapse/issues/3997)) +- Fix handling of rejected threepid invites ([\#3999](https://github.com/matrix-org/synapse/issues/3999)) +- Workers now start on Python 3. ([\#4027](https://github.com/matrix-org/synapse/issues/4027)) +- Synapse now starts on Python 3.7. ([\#4033](https://github.com/matrix-org/synapse/issues/4033)) + + +Internal Changes +---------------- + +- Log exceptions in looping calls ([\#4008](https://github.com/matrix-org/synapse/issues/4008)) +- Optimisation for serving federation requests ([\#4017](https://github.com/matrix-org/synapse/issues/4017)) +- Add metric to count number of non-empty sync responses ([\#4022](https://github.com/matrix-org/synapse/issues/4022)) + + Synapse 0.33.6 (2018-10-04) =========================== diff --git a/changelog.d/3995.bugfix b/changelog.d/3995.bugfix deleted file mode 100644 index 2adc36756b..0000000000 --- a/changelog.d/3995.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file diff --git a/changelog.d/3996.bugfix b/changelog.d/3996.bugfix deleted file mode 100644 index a056485ea1..0000000000 --- a/changelog.d/3996.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exception in background metrics collection diff --git a/changelog.d/3997.bugfix b/changelog.d/3997.bugfix deleted file mode 100644 index b060ee8c18..0000000000 --- a/changelog.d/3997.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exception handling in fetching remote profiles diff --git a/changelog.d/3999.bugfix b/changelog.d/3999.bugfix deleted file mode 100644 index dc3b2caffa..0000000000 --- a/changelog.d/3999.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix handling of rejected threepid invites diff --git a/changelog.d/4008.misc b/changelog.d/4008.misc deleted file mode 100644 index 5730210054..0000000000 --- a/changelog.d/4008.misc +++ /dev/null @@ -1 +0,0 @@ -Log exceptions in looping calls diff --git a/changelog.d/4017.misc b/changelog.d/4017.misc deleted file mode 100644 index b1ceb06560..0000000000 --- a/changelog.d/4017.misc +++ /dev/null @@ -1 +0,0 @@ -Optimisation for serving federation requests \ No newline at end of file diff --git a/changelog.d/4019.feature b/changelog.d/4019.feature deleted file mode 100644 index 49e066d269..0000000000 --- a/changelog.d/4019.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for end-to-end key backup (MSC1687) diff --git a/changelog.d/4022.misc b/changelog.d/4022.misc deleted file mode 100644 index 5b0e136795..0000000000 --- a/changelog.d/4022.misc +++ /dev/null @@ -1 +0,0 @@ -Add metric to count number of non-empty sync responses diff --git a/changelog.d/4027.bugfix b/changelog.d/4027.bugfix deleted file mode 100644 index c9bbff3f68..0000000000 --- a/changelog.d/4027.bugfix +++ /dev/null @@ -1 +0,0 @@ -Workers now start on Python 3. diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix deleted file mode 100644 index 05d54d9524..0000000000 --- a/changelog.d/4033.bugfix +++ /dev/null @@ -1 +0,0 @@ -Synapse now starts on Python 3.7. \ No newline at end of file -- cgit 1.4.1 From 762a0982aab04ebec1e7a00bc03d26aefa8461c4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 14:46:09 -0600 Subject: Python is hard --- synapse/handlers/auth.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9038fee264..f1befeb575 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -469,11 +469,12 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { "policies": { - "privacy_policy": { - "version": self.hs.config.user_consent_version, - "en": { - "name": "Privacy Policy", - "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + "privacy_policy": { + "version": self.hs.config.user_consent_version, + "en": { + "name": "Privacy Policy", + "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + }, }, }, } -- cgit 1.4.1 From 442734ff9e7a4ac09c54a58f8b5467379673914f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 14:56:13 -0600 Subject: Ensure the terms params are actually provided --- synapse/handlers/auth.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f1befeb575..12979f6ed3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -486,6 +486,7 @@ class AuthHandler(BaseHandler): get_params = { LoginType.RECAPTCHA: self._get_params_recaptcha, + LoginType.TERMS: self._get_params_terms, } params = {} -- cgit 1.4.1 From a8ed93a4b55a19a478c9aba929bfea07e691abbf Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 16:10:29 -0600 Subject: pep8 --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 3 --- synapse/rest/client/v2_alpha/register.py | 12 ++---------- 3 files changed, 3 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 12979f6ed3..bef796fd0c 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -469,7 +469,7 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { "policies": { - "privacy_policy": { + "privacy_policy": { "version": self.hs.config.user_consent_version, "en": { "name": "Privacy Policy", diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index ec583ad16a..0b2933fe8e 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -158,9 +158,6 @@ class AuthRestServlet(RestServlet): defer.returnValue(None) elif stagetype == LoginType.TERMS: session = request.args['session'][0] - authdict = { - 'session': session, - } html = TERMS_TEMPLATE % { 'session': session, diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 851ce6e9a4..c5214330ad 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -359,19 +359,11 @@ class RegisterRestServlet(RestServlet): [LoginType.MSISDN, LoginType.EMAIL_IDENTITY] ]) + # Append m.login.terms to all flows if we're requiring consent if self.hs.config.block_events_without_consent_error is not None: new_flows = [] for flow in flows: - # To only allow registration if completing GDPR auth, - # making clients that don't support it use fallback auth. flow.append(LoginType.TERMS) - - # or to duplicate all the flows above with the GDPR flow on the - # end so clients that support it can use it but clients that don't - # continue to consent via the DM from server notices bot. - #new_flows.extend([ - # flow + [LoginType.TERMS] - #]) flows.extend(new_flows) auth_result, params, session_id = yield self.auth_handler.check_auth( @@ -461,7 +453,7 @@ class RegisterRestServlet(RestServlet): ) if auth_result and LoginType.TERMS in auth_result: - logger.info("User %s has consented to the privacy policy" % registered_user_id) + logger.info("%s has consented to the privacy policy" % registered_user_id) yield self.store.user_set_consent_version( registered_user_id, self.hs.config.user_consent_version, ) -- cgit 1.4.1 From b8a5b0097c4fc88a43c5c5f9785720d38299d3d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 16 Oct 2018 10:44:49 +0100 Subject: Various cleanups in the federation client code (#4031) - Improve logging: log things in the right order, include destination and txids in all log lines, don't log successful responses twice - Fix the docstring on TransportLayerClient.send_transaction - Don't use treq.request, which is overcomplicated for our purposes: just use a twisted.web.client.Agent. - simplify the logic for setting up the bodyProducer - fix bytes/str confusions --- changelog.d/4031.misc | 1 + synapse/federation/transaction_queue.py | 27 +++++------- synapse/federation/transport/client.py | 19 ++++---- synapse/http/matrixfederationclient.py | 78 +++++++++++++++++---------------- 4 files changed, 64 insertions(+), 61 deletions(-) create mode 100644 changelog.d/4031.misc diff --git a/changelog.d/4031.misc b/changelog.d/4031.misc new file mode 100644 index 0000000000..60be8b59fd --- /dev/null +++ b/changelog.d/4031.misc @@ -0,0 +1 @@ +Various cleanups in the federation client code diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 98b5950800..3fdd63be95 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -633,14 +633,6 @@ class TransactionQueue(object): transaction, json_data_cb ) code = 200 - - if response: - for e_id, r in response.get("pdus", {}).items(): - if "error" in r: - logger.warn( - "Transaction returned error for %s: %s", - e_id, r, - ) except HttpResponseException as e: code = e.code response = e.response @@ -657,19 +649,24 @@ class TransactionQueue(object): destination, txn_id, code ) - logger.debug("TX [%s] Sent transaction", destination) - logger.debug("TX [%s] Marking as delivered...", destination) - yield self.transaction_actions.delivered( transaction, code, response ) - logger.debug("TX [%s] Marked as delivered", destination) + logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id) - if code != 200: + if code == 200: + for e_id, r in response.get("pdus", {}).items(): + if "error" in r: + logger.warn( + "TX [%s] {%s} Remote returned error for %s: %s", + destination, txn_id, e_id, r, + ) + else: for p in pdus: - logger.info( - "Failed to send event %s to %s", p.event_id, destination + logger.warn( + "TX [%s] {%s} Failed to send event %s", + destination, txn_id, p.event_id, ) success = False diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2ab973d6c8..edba5a9808 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -143,9 +143,17 @@ class TransportLayerClient(object): transaction (Transaction) Returns: - Deferred: Results of the deferred is a tuple in the form of - (response_code, response_body) where the response_body is a - python dict decoded from json + Deferred: Succeeds when we get a 2xx HTTP response. The result + will be the decoded JSON body. + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. + + Fails with ``NotRetryingDestination`` if we are not yet ready + to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ logger.debug( "send_data dest=%s, txid=%s", @@ -170,11 +178,6 @@ class TransportLayerClient(object): backoff_on_404=True, # If we get a 404 the other side has gone ) - logger.debug( - "send_data dest=%s, txid=%s, got response: 200", - transaction.destination, transaction.transaction_id, - ) - defer.returnValue(response) @defer.inlineCallbacks diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 14b12cd1c4..fcc02fc77d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -195,7 +195,7 @@ class MatrixFederationHttpClient(object): ) self.clock = hs.get_clock() self._store = hs.get_datastore() - self.version_string = hs.version_string.encode('ascii') + self.version_string_bytes = hs.version_string.encode('ascii') self.default_timeout = 60 def schedule(x): @@ -261,8 +261,8 @@ class MatrixFederationHttpClient(object): ignore_backoff=ignore_backoff, ) - method = request.method - destination = request.destination + method_bytes = request.method.encode("ascii") + destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) @@ -270,8 +270,8 @@ class MatrixFederationHttpClient(object): query_bytes = b"" headers_dict = { - "User-Agent": [self.version_string], - "Host": [request.destination], + b"User-Agent": [self.version_string_bytes], + b"Host": [destination_bytes], } with limiter: @@ -282,50 +282,51 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - url = urllib.parse.urlunparse(( - b"matrix", destination.encode("ascii"), + url_bytes = urllib.parse.urlunparse(( + b"matrix", destination_bytes, path_bytes, None, query_bytes, b"", - )).decode('ascii') + )) + url_str = url_bytes.decode('ascii') - http_url = urllib.parse.urlunparse(( + url_to_sign_bytes = urllib.parse.urlunparse(( b"", b"", path_bytes, None, query_bytes, b"", - )).decode('ascii') + )) while True: try: json = request.get_json() if json: - data = encode_canonical_json(json) - headers_dict["Content-Type"] = ["application/json"] + headers_dict[b"Content-Type"] = [b"application/json"] self.sign_request( - destination, method, http_url, headers_dict, json + destination_bytes, method_bytes, url_to_sign_bytes, + headers_dict, json, ) - else: - data = None - self.sign_request(destination, method, http_url, headers_dict) - - logger.info( - "{%s} [%s] Sending request: %s %s", - request.txn_id, destination, method, url - ) - - if data: + data = encode_canonical_json(json) producer = FileBodyProducer( BytesIO(data), - cooperator=self._cooperator + cooperator=self._cooperator, ) else: producer = None + self.sign_request( + destination_bytes, method_bytes, url_to_sign_bytes, + headers_dict, + ) - request_deferred = treq.request( - method, - url, + logger.info( + "{%s} [%s] Sending request: %s %s", + request.txn_id, request.destination, request.method, + url_str, + ) + + # we don't want all the fancy cookie and redirect handling that + # treq.request gives: just use the raw Agent. + request_deferred = self.agent.request( + method_bytes, + url_bytes, headers=Headers(headers_dict), - data=producer, - agent=self.agent, - reactor=self.hs.get_reactor(), - unbuffered=True + bodyProducer=producer, ) request_deferred = timeout_deferred( @@ -344,9 +345,9 @@ class MatrixFederationHttpClient(object): logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, - destination, - method, - url, + request.destination, + request.method, + url_str, _flatten_response_never_received(e), ) @@ -366,7 +367,7 @@ class MatrixFederationHttpClient(object): logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, - destination, + request.destination, delay, ) @@ -378,7 +379,7 @@ class MatrixFederationHttpClient(object): logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, - destination, + request.destination, response.code, response.phrase.decode('ascii', errors='replace'), ) @@ -411,8 +412,9 @@ class MatrixFederationHttpClient(object): destination_is must be non-None. method (bytes): The HTTP method of the request url_bytes (bytes): The URI path of the request - headers_dict (dict): Dictionary of request headers to append to - content (bytes): The body of the request + headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to + append to + content (object): The body of the request destination_is (bytes): As 'destination', but if the destination is an identity server -- cgit 1.4.1 From a94967bc5f3735375f12bf8275236eb2ea1cc66b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 15:29:08 +0100 Subject: run the circle builds in docker containers Docker containers spin up faster than entire VMs. --- .circleci/config.yml | 89 ++++++++++++++++++++++++++++------------------------ 1 file changed, 48 insertions(+), 41 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ec3848b048..5395028426 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -23,99 +23,106 @@ jobs: - run: docker push matrixdotorg/synapse:latest - run: docker push matrixdotorg/synapse:latest-py3 sytestpy2: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy2 + working_directory: /src steps: - checkout - - run: docker pull matrixdotorg/sytest-synapsepy2 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2 + - run: /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy2postgres: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy2 + working_directory: /src steps: - checkout - - run: docker pull matrixdotorg/sytest-synapsepy2 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2 + - run: POSTGRES=1 /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy2merged: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy2 + working_directory: /src steps: - checkout - run: bash .circleci/merge_base_branch.sh - - run: docker pull matrixdotorg/sytest-synapsepy2 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2 + - run: /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs - + path: /logs sytestpy2postgresmerged: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy2 + working_directory: /src steps: - checkout - run: bash .circleci/merge_base_branch.sh - - run: docker pull matrixdotorg/sytest-synapsepy2 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2 + - run: POSTGRES=1 /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy3: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy3 + working_directory: /src steps: - checkout - - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 + - run: /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy3postgres: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy3 + working_directory: /src steps: - checkout - - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3 + - run: POSTGRES=1 /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy3merged: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy3 + working_directory: /src steps: - checkout - run: bash .circleci/merge_base_branch.sh - - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 + - run: /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs sytestpy3postgresmerged: - machine: true + docker: + - image: matrixdotorg/sytest-synapsepy3 + working_directory: /src steps: - checkout - run: bash .circleci/merge_base_branch.sh - - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3 + - run: POSTGRES=1 /synapse_sytest.sh - store_artifacts: - path: ~/project/logs + path: /logs destination: logs - store_test_results: - path: logs + path: /logs workflows: version: 2 -- cgit 1.4.1 From e238013c44714174f1bf9a2b9b1f576728f40784 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 15:13:17 +0100 Subject: Add v2 state res algorithm. We hook this up to the vdh test room version. --- synapse/state/__init__.py | 87 ++++++-- synapse/state/v2.py | 544 ++++++++++++++++++++++++++++++++++++++++++++++ synapse/storage/events.py | 9 +- 3 files changed, 616 insertions(+), 24 deletions(-) create mode 100644 synapse/state/v2.py diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b22495c1f9..836e137296 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -19,13 +19,14 @@ from collections import namedtuple from six import iteritems, itervalues +import attr from frozendict import frozendict from twisted.internet import defer from synapse.api.constants import EventTypes, RoomVersions from synapse.events.snapshot import EventContext -from synapse.state import v1 +from synapse.state import v1, v2 from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache @@ -372,15 +373,10 @@ class StateHandler(object): result = yield self._state_resolution_handler.resolve_state_groups( room_id, room_version, state_groups_ids, None, - self._state_map_factory, + state_res_store=StateResolutionStore(self.store), ) defer.returnValue(result) - def _state_map_factory(self, ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( @@ -401,7 +397,7 @@ class StateHandler(object): new_state = yield resolve_events_with_factory( room_version, state_set_ids, event_map=state_map, - state_map_factory=self._state_map_factory + state_res_store=StateResolutionStore(self.store), ) new_state = { @@ -436,7 +432,7 @@ class StateResolutionHandler(object): @defer.inlineCallbacks @log_function def resolve_state_groups( - self, room_id, room_version, state_groups_ids, event_map, state_map_factory, + self, room_id, room_version, state_groups_ids, event_map, state_res_store, ): """Resolves conflicts between a set of state groups @@ -454,9 +450,11 @@ class StateResolutionHandler(object): a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. + events will be requested via state_res_store. + + If None, all events will be fetched via state_res_store. - If None, all events will be fetched via state_map_factory. + state_res_store (StateResolutionStore) Returns: Deferred[_StateCacheEntry]: resolved state @@ -502,7 +500,7 @@ class StateResolutionHandler(object): room_version, list(itervalues(state_groups_ids)), event_map=event_map, - state_map_factory=state_map_factory, + state_res_store=state_res_store, ) # if the new state matches any of the input state groups, we can @@ -583,7 +581,7 @@ def _make_state_cache_entry( ) -def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): +def resolve_events_with_factory(room_version, state_sets, event_map, state_res_store): """ Args: room_version(str): Version of the room @@ -599,17 +597,19 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f If None, all events will be fetched via state_map_factory. - state_map_factory(func): will be called - with a list of event_ids that are needed, and should return with - a Deferred of dict of event_id to event. + state_res_store (StateResolutionStore) Returns Deferred[dict[(str, str), str]]: a map from (type, state_key) to event_id. """ - if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): + if room_version == RoomVersions.V1: return v1.resolve_events_with_factory( - state_sets, event_map, state_map_factory, + state_sets, event_map, state_res_store.get_events, + ) + elif room_version == RoomVersions.VDH_TEST: + return v2.resolve_events_with_factory( + state_sets, event_map, state_res_store, ) else: # This should only happen if we added a version but forgot to add it to @@ -617,3 +617,54 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f raise Exception( "No state resolution algorithm defined for version %r" % (room_version,) ) + + +@attr.s +class StateResolutionStore(object): + """Interface that allows state resolution algorithms to access the database + in well defined way. + + Args: + store (DataStore) + """ + + store = attr.ib() + + def get_events(self, event_ids, allow_rejected=False): + """Get events from the database + + Args: + event_ids (list): The event_ids of the events to fetch + allow_rejected (bool): If True return rejected events. + + Returns: + Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + """ + + return self.store.get_events( + event_ids, + check_redacted=False, + get_prev_content=False, + allow_rejected=allow_rejected, + ) + + def get_auth_chain(self, event_ids): + """Gets the full auth chain for a set of events (including rejected + events). + + Includes the given event IDs in the result. + + Note that: + 1. All events must be state events. + 2. For v1 rooms this may not have the full auth chain in the + presence of rejected events + + Args: + event_ids (list): The event IDs of the events to fetch the auth + chain for. Must be state events. + + Returns: + Deferred[list[str]]: List of event IDs of the auth chain. + """ + + return self.store.get_auth_chain_ids(event_ids, include_given=True) diff --git a/synapse/state/v2.py b/synapse/state/v2.py new file mode 100644 index 0000000000..d034b4f38e --- /dev/null +++ b/synapse/state/v2.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import heapq +import itertools +import logging + +from six import iteritems, itervalues + +from twisted.internet import defer + +from synapse import event_auth +from synapse.api.constants import EventTypes +from synapse.api.errors import AuthError + +logger = logging.getLogger(__name__) + + +@defer.inlineCallbacks +def resolve_events_with_factory(state_sets, event_map, state_res_store): + """Resolves the state using the v2 state resolution algorithm + + Args: + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map(dict[str,FrozenEvent]|None): + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_res_store. + + If None, all events will be fetched via state_res_store. + + state_res_store (StateResolutionStore) + + Returns + Deferred[dict[(str, str), str]]: + a map from (type, state_key) to event_id. + """ + + logger.debug("Computing conflicted state") + + # First split up the un/conflicted state + unconflicted_state, conflicted_state = _seperate(state_sets) + + if not conflicted_state: + defer.returnValue(unconflicted_state) + + logger.debug("%d conflicted state entries", len(conflicted_state)) + logger.debug("Calculating auth chain difference") + + # Also fetch all auth events that appear in only some of the state sets' + # auth chains. + auth_diff = yield _get_auth_chain_difference( + state_sets, event_map, state_res_store, + ) + + full_conflicted_set = set(itertools.chain( + itertools.chain.from_iterable(itervalues(conflicted_state)), + auth_diff, + )) + + events = yield state_res_store.get_events([ + eid for eid in full_conflicted_set + if eid not in event_map + ], allow_rejected=True) + event_map.update(events) + + full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map) + + logger.debug("%d full_conflicted_set entries", len(full_conflicted_set)) + + # Get and sort all the power events (kicks/bans/etc) + power_events = ( + eid for eid in full_conflicted_set + if _is_power_event(event_map[eid]) + ) + + sorted_power_events = yield _reverse_topological_power_sort( + power_events, + event_map, + state_res_store, + full_conflicted_set, + ) + + logger.debug("sorted %d power events", len(sorted_power_events)) + + # Now sequentially auth each one + resolved_state = yield _iterative_auth_checks( + sorted_power_events, unconflicted_state, event_map, + state_res_store, + ) + + logger.debug("resolved power events") + + # OK, so we've now resolved the power events. Now sort the remaining + # events using the mainline of the resolved power level. + + leftover_events = [ + ev_id + for ev_id in full_conflicted_set + if ev_id not in sorted_power_events + ] + + logger.debug("sorting %d remaining events", len(leftover_events)) + + pl = resolved_state.get((EventTypes.PowerLevels, ""), None) + leftover_events = yield _mainline_sort( + leftover_events, pl, event_map, state_res_store, + ) + + logger.debug("resolving remaining events") + + resolved_state = yield _iterative_auth_checks( + leftover_events, resolved_state, event_map, + state_res_store, + ) + + logger.debug("resolved") + + # We make sure that unconflicted state always still applies. + resolved_state.update(unconflicted_state) + + logger.debug("done") + + defer.returnValue(resolved_state) + + +@defer.inlineCallbacks +def _get_power_level_for_sender(event_id, event_map, state_res_store): + """Return the power level of the sender of the given event according to + their auth events. + + Args: + event_id (str) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[int] + """ + event = yield _get_event(event_id, event_map, state_res_store) + + pl = None + for aid, _ in event.auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): + pl = aev + break + + if pl is None: + # Couldn't find power level. Check if they're the creator of the room + for aid, _ in event.auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.Create, ""): + if aev.content.get("creator") == event.sender: + defer.returnValue(100) + break + defer.returnValue(0) + + level = pl.content.get("users", {}).get(event.sender) + if level is None: + level = pl.content.get("users_default", 0) + + if level is None: + defer.returnValue(0) + else: + defer.returnValue(int(level)) + + +@defer.inlineCallbacks +def _get_auth_chain_difference(state_sets, event_map, state_res_store): + """Compare the auth chains of each state set and return the set of events + that only appear in some but not all of the auth chains. + + Args: + state_sets (list) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[set[str]]: Set of event IDs + """ + common = set(itervalues(state_sets[0])).intersection( + *(itervalues(s) for s in state_sets[1:]) + ) + + auth_sets = [] + for state_set in state_sets: + auth_ids = set( + eid + for key, eid in iteritems(state_set) + if (key[0] in ( + EventTypes.Member, + EventTypes.ThirdPartyInvite, + ) or key in ( + (EventTypes.PowerLevels, ''), + (EventTypes.Create, ''), + (EventTypes.JoinRules, ''), + )) and eid not in common + ) + + auth_chain = yield state_res_store.get_auth_chain(auth_ids) + auth_ids.update(auth_chain) + + auth_sets.append(auth_ids) + + intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) + union = set().union(*auth_sets) + + defer.returnValue(union - intersection) + + +def _seperate(state_sets): + """Return the unconflicted and conflicted state. This is different than in + the original algorithm, as this defines a key to be conflicted if one of + the state sets doesn't have that key. + + Args: + state_sets (list) + + Returns: + tuple[dict, dict]: A tuple of unconflicted and conflicted state. The + conflicted state dict is a map from type/state_key to set of event IDs + """ + unconflicted_state = {} + conflicted_state = {} + + for key in set(itertools.chain.from_iterable(state_sets)): + event_ids = set(state_set.get(key) for state_set in state_sets) + if len(event_ids) == 1: + unconflicted_state[key] = event_ids.pop() + else: + event_ids.discard(None) + conflicted_state[key] = event_ids + + return unconflicted_state, conflicted_state + + +def _is_power_event(event): + """Return whether or not the event is a "power event", as defined by the + v2 state resolution algorithm + + Args: + event (FrozenEvent) + + Returns: + boolean + """ + if (event.type, event.state_key) in ( + (EventTypes.PowerLevels, ""), + (EventTypes.JoinRules, ""), + (EventTypes.Create, ""), + ): + return True + + if event.type == EventTypes.Member: + if event.membership in ('leave', 'ban'): + return event.sender != event.state_key + + return False + + +@defer.inlineCallbacks +def _add_event_and_auth_chain_to_graph(graph, event_id, event_map, + state_res_store, auth_diff): + """Helper function for _reverse_topological_power_sort that add the event + and its auth chain (that is in the auth diff) to the graph + + Args: + graph (dict[str, set[str]]): A map from event ID to the events auth + event IDs + event_id (str): Event to add to the graph + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + auth_diff (set[str]): Set of event IDs that are in the auth difference. + """ + + state = [event_id] + while state: + eid = state.pop() + graph.setdefault(eid, set()) + + event = yield _get_event(eid, event_map, state_res_store) + for aid, _ in event.auth_events: + if aid in auth_diff: + if aid not in graph: + state.append(aid) + + graph.setdefault(eid, set()).add(aid) + + +@defer.inlineCallbacks +def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_diff): + """Returns a list of the event_ids sorted by reverse topological ordering, + and then by power level and origin_server_ts + + Args: + event_ids (list[str]): The events to sort + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + auth_diff (set[str]): Set of event IDs that are in the auth difference. + + Returns: + Deferred[list[str]]: The sorted list + """ + + graph = {} + for event_id in event_ids: + yield _add_event_and_auth_chain_to_graph( + graph, event_id, event_map, state_res_store, auth_diff, + ) + + event_to_pl = {} + for event_id in graph: + pl = yield _get_power_level_for_sender(event_id, event_map, state_res_store) + event_to_pl[event_id] = pl + + def _get_power_order(event_id): + ev = event_map[event_id] + pl = event_to_pl[event_id] + + return -pl, ev.origin_server_ts, event_id + + # Note: graph is modified during the sort + it = lexicographical_topological_sort( + graph, + key=_get_power_order, + ) + sorted_events = list(it) + + defer.returnValue(sorted_events) + + +@defer.inlineCallbacks +def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store): + """Sequentially apply auth checks to each event in given list, updating the + state as it goes along. + + Args: + event_ids (list[str]): Ordered list of events to apply auth checks to + base_state (dict[tuple[str, str], str]): The set of state to start with + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[dict[tuple[str, str], str]]: Returns the final updated state + """ + resolved_state = base_state.copy() + + for event_id in event_ids: + event = event_map[event_id] + + auth_events = {} + for aid, _ in event.auth_events: + ev = yield _get_event(aid, event_map, state_res_store) + + if ev.rejected_reason is None: + auth_events[(ev.type, ev.state_key)] = ev + + for key in event_auth.auth_types_for_event(event): + if key in resolved_state: + ev_id = resolved_state[key] + ev = yield _get_event(ev_id, event_map, state_res_store) + + if ev.rejected_reason is None: + auth_events[key] = event_map[ev_id] + + try: + event_auth.check( + event, auth_events, + do_sig_check=False, + do_size_check=False + ) + + resolved_state[(event.type, event.state_key)] = event_id + except AuthError: + pass + + defer.returnValue(resolved_state) + + +@defer.inlineCallbacks +def _mainline_sort(event_ids, resolved_power_event_id, event_map, + state_res_store): + """Returns a sorted list of event_ids sorted by mainline ordering based on + the given event resolved_power_event_id + + Args: + event_ids (list[str]): Events to sort + resolved_power_event_id (str): The final resolved power level event ID + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[list[str]]: The sorted list + """ + mainline = [] + pl = resolved_power_event_id + while pl: + mainline.append(pl) + pl_ev = yield _get_event(pl, event_map, state_res_store) + auth_events = pl_ev.auth_events + pl = None + for aid, _ in auth_events: + ev = yield _get_event(aid, event_map, state_res_store) + if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""): + pl = aid + break + + mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(reversed(mainline))} + + event_ids = list(event_ids) + + order_map = {} + for ev_id in event_ids: + depth = yield _get_mainline_depth_for_event( + event_map[ev_id], mainline_map, + event_map, state_res_store, + ) + order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id) + + event_ids.sort(key=lambda ev_id: order_map[ev_id]) + + defer.returnValue(event_ids) + + +@defer.inlineCallbacks +def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_store): + """Get the mainline depths for the given event based on the mainline map + + Args: + event (FrozenEvent) + mainline_map (dict[str, int]): Map from event_id to mainline depth for + events in the mainline. + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[int] + """ + + # We do an iterative search, replacing `event with the power level in its + # auth events (if any) + while event: + depth = mainline_map.get(event.event_id) + if depth is not None: + defer.returnValue(depth) + + auth_events = event.auth_events + event = None + + for aid, _ in auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): + event = aev + break + + # Didn't find a power level auth event, so we just return 0 + defer.returnValue(0) + + +@defer.inlineCallbacks +def _get_event(event_id, event_map, state_res_store): + """Helper function to look up event in event_map, falling back to looking + it up in the store + + Args: + event_id (str) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[FrozenEvent] + """ + if event_id not in event_map: + events = yield state_res_store.get_events([event_id], allow_rejected=True) + event_map.update(events) + defer.returnValue(event_map[event_id]) + + +def lexicographical_topological_sort(graph, key): + """Performs a lexicographic reverse topological sort on the graph. + + This returns a reverse topological sort (i.e. if node A references B then B + appears before A in the sort), with ties broken lexicographically based on + return value of the `key` function. + + NOTE: `graph` is modified during the sort. + + Args: + graph (dict[str, set[str]]): A representation of the graph where each + node is a key in the dict and its value are the nodes edges. + key (func): A function that takes a node and returns a value that is + comparable and used to order nodes + + Yields: + str: The next node in the topological sort + """ + + # Note, this is basically Kahn's algorithm except we look at nodes with no + # outgoing edges, c.f. + # https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + outdegree_map = graph + reverse_graph = {} + + # Lists of nodes with zero out degree. Is actually a tuple of + # `(key(node), node)` so that sorting does the right thing + zero_outdegree = [] + + for node, edges in iteritems(graph): + if len(edges) == 0: + zero_outdegree.append((key(node), node)) + + reverse_graph.setdefault(node, set()) + for edge in edges: + reverse_graph.setdefault(edge, set()).add(node) + + # heapq is a built in implementation of a sorted queue. + heapq.heapify(zero_outdegree) + + while zero_outdegree: + _, node = heapq.heappop(zero_outdegree) + + for parent in reverse_graph[node]: + out = outdegree_map[parent] + out.discard(node) + if len(out) == 0: + heapq.heappush(zero_outdegree, (key(parent), parent)) + + yield node diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 03cedf3a75..fc88edcb39 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -34,6 +34,7 @@ from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.state import StateResolutionStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore @@ -731,11 +732,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # Ok, we need to defer to the state handler to resolve our state sets. - def get_events(ev_ids): - return self.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - state_groups = { sg: state_groups_map[sg] for sg in new_state_groups } @@ -745,7 +741,8 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.debug("calling resolve_state_groups from preserve_events") res = yield self._state_resolution_handler.resolve_state_groups( - room_id, room_version, state_groups, events_map, get_events + room_id, room_version, state_groups, events_map, + state_res_store=StateResolutionStore(self) ) defer.returnValue((res.state, None)) -- cgit 1.4.1 From 6bd856caa29a022a537ccd415bae8c286baa6881 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 16:15:51 +0100 Subject: User event.sender rather than alias event.user_id --- synapse/event_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index af3eee95b9..d4d4474847 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -690,7 +690,7 @@ def auth_types_for_event(event): auth_types = [] auth_types.append((EventTypes.PowerLevels, "", )) - auth_types.append((EventTypes.Member, event.user_id, )) + auth_types.append((EventTypes.Member, event.sender, )) auth_types.append((EventTypes.Create, "", )) if event.type == EventTypes.Member: -- cgit 1.4.1 From 947c7443eb45f93c7bb25bef7bb263e8062530ea Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 15:14:14 +0100 Subject: Add some state res v2 tests --- tests/state/__init__.py | 0 tests/state/test_v2.py | 666 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 666 insertions(+) create mode 100644 tests/state/__init__.py create mode 100644 tests/state/test_v2.py diff --git a/tests/state/__init__.py b/tests/state/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py new file mode 100644 index 0000000000..b96bb6b25c --- /dev/null +++ b/tests/state/test_v2.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +from six.moves import zip + +import attr + +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.event_auth import auth_types_for_event +from synapse.events import FrozenEvent +from synapse.state.v2 import ( + lexicographical_topological_sort, + resolve_events_with_factory, +) +from synapse.types import EventID + +from tests import unittest + +ALICE = "@alice:example.com" +BOB = "@bob:example.com" +CHARLIE = "@charlie:example.com" +EVELYN = "@evelyn:example.com" +ZARA = "@zara:example.com" + +ROOM_ID = "!test:example.com" + +MEMBERSHIP_CONTENT_JOIN = {"membership": Membership.JOIN} +MEMBERSHIP_CONTENT_BAN = {"membership": Membership.BAN} + + +ORIGIN_SERVER_TS = 0 + + +class FakeEvent(object): + """A fake event we use as a convenience. + + NOTE: Again as a convenience we use "node_ids" rather than event_ids to + refer to events. The event_id has node_id as localpart and example.com + as domain. + """ + def __init__(self, id, sender, type, state_key, content): + self.node_id = id + self.event_id = EventID(id, "example.com").to_string() + self.sender = sender + self.type = type + self.state_key = state_key + self.content = content + + def to_event(self, auth_events, prev_events): + """Given the auth_events and prev_events, convert to a Frozen Event + + Args: + auth_events (list[str]): list of event_ids + prev_events (list[str]): list of event_ids + + Returns: + FrozenEvent + """ + global ORIGIN_SERVER_TS + + ts = ORIGIN_SERVER_TS + ORIGIN_SERVER_TS = ORIGIN_SERVER_TS + 1 + + event_dict = { + "auth_events": [(a, {}) for a in auth_events], + "prev_events": [(p, {}) for p in prev_events], + "event_id": self.node_id, + "sender": self.sender, + "type": self.type, + "content": self.content, + "origin_server_ts": ts, + "room_id": ROOM_ID, + } + + if self.state_key is not None: + event_dict["state_key"] = self.state_key + + return FrozenEvent(event_dict) + + +# All graphs start with this set of events +INITIAL_EVENTS = [ + FakeEvent( + id="CREATE", + sender=ALICE, + type=EventTypes.Create, + state_key="", + content={"creator": ALICE}, + ), + FakeEvent( + id="IMA", + sender=ALICE, + type=EventTypes.Member, + state_key=ALICE, + content=MEMBERSHIP_CONTENT_JOIN, + ), + FakeEvent( + id="IPOWER", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key="", + content={"users": {ALICE: 100}}, + ), + FakeEvent( + id="IJR", + sender=ALICE, + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.PUBLIC}, + ), + FakeEvent( + id="IMB", + sender=BOB, + type=EventTypes.Member, + state_key=BOB, + content=MEMBERSHIP_CONTENT_JOIN, + ), + FakeEvent( + id="IMC", + sender=CHARLIE, + type=EventTypes.Member, + state_key=CHARLIE, + content=MEMBERSHIP_CONTENT_JOIN, + ), + FakeEvent( + id="IMZ", + sender=ZARA, + type=EventTypes.Member, + state_key=ZARA, + content=MEMBERSHIP_CONTENT_JOIN, + ), + FakeEvent( + id="START", + sender=ZARA, + type=EventTypes.Message, + state_key=None, + content={}, + ), + FakeEvent( + id="END", + sender=ZARA, + type=EventTypes.Message, + state_key=None, + content={}, + ), +] + +INITIAL_EDGES = [ + "START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE", +] + + +class StateTestCase(unittest.TestCase): + def test_ban_vs_pl(self): + events = [ + FakeEvent( + id="PA", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key="", + content={ + "users": { + ALICE: 100, + BOB: 50, + } + }, + ), + FakeEvent( + id="MA", + sender=ALICE, + type=EventTypes.Member, + state_key=ALICE, + content={"membership": Membership.JOIN}, + ), + FakeEvent( + id="MB", + sender=ALICE, + type=EventTypes.Member, + state_key=BOB, + content={"membership": Membership.BAN}, + ), + FakeEvent( + id="PB", + sender=BOB, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + ] + + edges = [ + ["END", "MB", "MA", "PA", "START"], + ["END", "PB", "PA"], + ] + + expected_state_ids = ["PA", "MA", "MB"] + + self.do_check(events, edges, expected_state_ids) + + def test_join_rule_evasion(self): + events = [ + FakeEvent( + id="JR", + sender=ALICE, + type=EventTypes.JoinRules, + state_key="", + content={"join_rules": JoinRules.PRIVATE}, + ), + FakeEvent( + id="ME", + sender=EVELYN, + type=EventTypes.Member, + state_key=EVELYN, + content={"membership": Membership.JOIN}, + ), + ] + + edges = [ + ["END", "JR", "START"], + ["END", "ME", "START"], + ] + + expected_state_ids = ["JR"] + + self.do_check(events, edges, expected_state_ids) + + def test_offtopic_pl(self): + events = [ + FakeEvent( + id="PA", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key="", + content={ + "users": { + ALICE: 100, + BOB: 50, + } + }, + ), + FakeEvent( + id="PB", + sender=BOB, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + CHARLIE: 50, + }, + }, + ), + FakeEvent( + id="PC", + sender=CHARLIE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + CHARLIE: 0, + }, + }, + ), + ] + + edges = [ + ["END", "PC", "PB", "PA", "START"], + ["END", "PA"], + ] + + expected_state_ids = ["PC"] + + self.do_check(events, edges, expected_state_ids) + + def test_topic_basic(self): + events = [ + FakeEvent( + id="T1", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="PA1", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + FakeEvent( + id="T2", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="PA2", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 0, + }, + }, + ), + FakeEvent( + id="PB", + sender=BOB, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + FakeEvent( + id="T3", + sender=BOB, + type=EventTypes.Topic, + state_key="", + content={}, + ), + ] + + edges = [ + ["END", "PA2", "T2", "PA1", "T1", "START"], + ["END", "T3", "PB", "PA1"], + ] + + expected_state_ids = ["PA2", "T2"] + + self.do_check(events, edges, expected_state_ids) + + def test_topic_reset(self): + events = [ + FakeEvent( + id="T1", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="PA", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + FakeEvent( + id="T2", + sender=BOB, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="MB", + sender=ALICE, + type=EventTypes.Member, + state_key=BOB, + content={"membership": Membership.BAN}, + ), + ] + + edges = [ + ["END", "MB", "T2", "PA", "T1", "START"], + ["END", "T1"], + ] + + expected_state_ids = ["T1", "MB", "PA"] + + self.do_check(events, edges, expected_state_ids) + + def test_topic(self): + events = [ + FakeEvent( + id="T1", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="PA1", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + FakeEvent( + id="T2", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="PA2", + sender=ALICE, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 0, + }, + }, + ), + FakeEvent( + id="PB", + sender=BOB, + type=EventTypes.PowerLevels, + state_key='', + content={ + "users": { + ALICE: 100, + BOB: 50, + }, + }, + ), + FakeEvent( + id="T3", + sender=BOB, + type=EventTypes.Topic, + state_key="", + content={}, + ), + FakeEvent( + id="MZ1", + sender=ZARA, + type=EventTypes.Message, + state_key=None, + content={}, + ), + FakeEvent( + id="T4", + sender=ALICE, + type=EventTypes.Topic, + state_key="", + content={}, + ), + ] + + edges = [ + ["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], + ["END", "MZ1", "T3", "PB", "PA1"], + ] + + expected_state_ids = ["T4", "PA2"] + + self.do_check(events, edges, expected_state_ids) + + def do_check(self, events, edges, expected_state_ids): + """Take a list of events and edges and calculate the state of the + graph at END, and asserts it matches `expected_state_ids` + + Args: + events (list[FakeEvent]) + edges (list[list[str]]): A list of chains of event edges, e.g. + `[[A, B, C]]` are edges A->B and B->C. + expected_state_ids (list[str]): The expected state at END, (excluding + the keys that haven't changed since START). + """ + # We want to sort the events into topological order for processing. + graph = {} + + # node_id -> FakeEvent + fake_event_map = {} + + for ev in itertools.chain(INITIAL_EVENTS, events): + graph[ev.node_id] = set() + fake_event_map[ev.node_id] = ev + + for a, b in pairwise(INITIAL_EDGES): + graph[a].add(b) + + for edge_list in edges: + for a, b in pairwise(edge_list): + graph[a].add(b) + + # event_id -> FrozenEvent + event_map = {} + # node_id -> state + state_at_event = {} + + # We copy the map as the sort consumes the graph + graph_copy = {k: set(v) for k, v in graph.items()} + + for node_id in lexicographical_topological_sort(graph_copy, key=lambda e: e): + fake_event = fake_event_map[node_id] + event_id = fake_event.event_id + + prev_events = list(graph[node_id]) + + if len(prev_events) == 0: + state_before = {} + elif len(prev_events) == 1: + state_before = dict(state_at_event[prev_events[0]]) + else: + state_d = resolve_events_with_factory( + [state_at_event[n] for n in prev_events], + event_map=event_map, + state_res_store=TestStateResolutionStore(event_map), + ) + + self.assertTrue(state_d.called) + state_before = state_d.result + + state_after = dict(state_before) + if fake_event.state_key is not None: + state_after[(fake_event.type, fake_event.state_key)] = event_id + + auth_types = set(auth_types_for_event(fake_event)) + + auth_events = [] + for key in auth_types: + if key in state_before: + auth_events.append(state_before[key]) + + event = fake_event.to_event(auth_events, prev_events) + + state_at_event[node_id] = state_after + event_map[event_id] = event + + expected_state = {} + for node_id in expected_state_ids: + # expected_state_ids are node IDs rather than event IDs, + # so we have to convert + event_id = EventID(node_id, "example.com").to_string() + event = event_map[event_id] + + key = (event.type, event.state_key) + + expected_state[key] = event_id + + start_state = state_at_event["START"] + end_state = { + key: value + for key, value in state_at_event["END"].items() + if key in expected_state or start_state.get(key) != value + } + + self.assertEqual(expected_state, end_state) + + +class LexicographicalTestCase(unittest.TestCase): + def test_simple(self): + graph = { + "l": {"o"}, + "m": {"n", "o"}, + "n": {"o"}, + "o": set(), + "p": {"o"}, + } + + res = list(lexicographical_topological_sort(graph, key=lambda x: x)) + + self.assertEqual(["o", "l", "n", "m", "p"], res) + + +def pairwise(iterable): + "s -> (s0,s1), (s1,s2), (s2, s3), ..." + a, b = itertools.tee(iterable) + next(b, None) + return zip(a, b) + + +@attr.s +class TestStateResolutionStore(object): + event_map = attr.ib() + + def get_events(self, event_ids, allow_rejected=False): + """Get events from the database + + Args: + event_ids (list): The event_ids of the events to fetch + allow_rejected (bool): If True return rejected events. + + Returns: + Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + """ + + return { + eid: self.event_map[eid] + for eid in event_ids + if eid in self.event_map + } + + def get_auth_chain(self, event_ids): + """Gets the full auth chain for a set of events (including rejected + events). + + Includes the given event IDs in the result. + + Note that: + 1. All events must be state events. + 2. For v1 rooms this may not have the full auth chain in the + presence of rejected events + + Args: + event_ids (list): The event IDs of the events to fetch the auth + chain for. Must be state events. + + Returns: + Deferred[list[str]]: List of event IDs of the auth chain. + """ + + # Simple DFS for auth chain + result = set() + stack = list(event_ids) + while stack: + event_id = stack.pop() + if event_id in result: + continue + + result.add(event_id) + + event = self.event_map[event_id] + for aid, _ in event.auth_events: + stack.append(aid) + + return list(result) -- cgit 1.4.1 From fc954960e91110ca1bb126657ebd04d5938193ca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 15:45:03 +0100 Subject: Newsfile --- changelog.d/3786.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3786.misc diff --git a/changelog.d/3786.misc b/changelog.d/3786.misc new file mode 100644 index 0000000000..a9f9a2bb27 --- /dev/null +++ b/changelog.d/3786.misc @@ -0,0 +1 @@ +Add initial implementation of new state resolution algorithm -- cgit 1.4.1 From 15133477ee6ea624dd937329c41d2d3c8ff2bd56 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 13:58:37 +0100 Subject: Fix up use of resolve_events_with_factory --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..b028d58ae4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -53,7 +53,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet -from synapse.state import resolve_events_with_factory +from synapse.state import StateResolutionStore, resolve_events_with_factory from synapse.types import UserID, get_domain_from_id from synapse.util import logcontext, unwrapFirstError from synapse.util.async_helpers import Linearizer @@ -384,21 +384,18 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x - # Resolve any conflicting state - @defer.inlineCallbacks - def fetch(ev_ids): - fetched = yield self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - # add any events we fetch here to the `event_map` so that we - # can use them to build the state event list below. - event_map.update(fetched) - defer.returnValue(fetched) - room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_maps, event_map, fetch, + room_version, state_maps, event_map, + state_res_store=StateResolutionStore(self.store), + ) + + evs = yield self.store.get_events( + list(state_map.values()), + get_prev_content=False, + check_redacted=False, ) + event_map.update(evs) # we need to give _process_received_pdu the actual state events # rather than event ids, so generate that now. -- cgit 1.4.1 From 4a28d3d36fa21446e60a2a5142626a385e1f27af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 14:01:53 +0100 Subject: Update event_auth table for rejected events --- synapse/storage/events.py | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index fc88edcb39..c780f55277 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -851,6 +851,27 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # Insert into event_to_state_groups. self._store_event_state_mappings_txn(txn, events_and_contexts) + # We want to store event_auth mappings for rejected events, as they're + # used in state res v2. + # This is only necessary if the rejected event appears in an accepted + # event's auth chain, but its easier for now just to store them (and + # it doesn't take much storage compared to storing the entire event + # anyway). + self._simple_insert_many_txn( + txn, + table="event_auth", + values=[ + { + "event_id": event.event_id, + "room_id": event.room_id, + "auth_id": auth_id, + } + for event, _ in events_and_contexts + for auth_id, _ in event.auth_events + if event.is_state() + ], + ) + # _store_rejected_events_txn filters out any events which were # rejected, and returns the filtered list. events_and_contexts = self._store_rejected_events_txn( @@ -1326,21 +1347,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore txn, event.room_id, event.redacts ) - self._simple_insert_many_txn( - txn, - table="event_auth", - values=[ - { - "event_id": event.event_id, - "room_id": event.room_id, - "auth_id": auth_id, - } - for event, _ in events_and_contexts - for auth_id, _ in event.auth_events - if event.is_state() - ], - ) - # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. self._handle_mult_prev_events( -- cgit 1.4.1 From 017eb9d17acff2fd6f053f6e500eda0e26cc22eb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 16:31:47 +0100 Subject: changelog --- changelog.d/4041.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4041.misc diff --git a/changelog.d/4041.misc b/changelog.d/4041.misc new file mode 100644 index 0000000000..8cce9daac9 --- /dev/null +++ b/changelog.d/4041.misc @@ -0,0 +1 @@ +Run the CircleCI builds in docker containers -- cgit 1.4.1 From 10405153c226b9aa2eccb0c4cf16de03cabde778 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 16:47:26 +0100 Subject: Use wget rather than curl the docker image doesn't have wget --- .circleci/merge_base_branch.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh index 6b0bf3aa48..b2c8c40f4c 100755 --- a/.circleci/merge_base_branch.sh +++ b/.circleci/merge_base_branch.sh @@ -16,7 +16,7 @@ then GITBASE="develop" else # Get the reference, using the GitHub API - GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` + GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` fi # Show what we are before @@ -31,4 +31,4 @@ git fetch -u origin $GITBASE git merge --no-edit origin/$GITBASE # Show what we are after. -git show -s \ No newline at end of file +git show -s -- cgit 1.4.1 From fc0f13dd036cec4e41f5969d021d9dd10d6e5016 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 20:37:16 +0100 Subject: Fix incorrect truncation in get_missing_events It's quite important that get_missing_events returns the *latest* events in the room; however we were pulling event ids out of the database until we got *at least* 10, and then taking the *earliest* of the results. We also shouldn't really be relying on depth, and should be checking the room_id. --- changelog.d/4045.bugfix | 1 + synapse/federation/federation_server.py | 8 +++---- synapse/federation/transport/server.py | 2 -- synapse/handlers/federation.py | 12 +++++------ synapse/storage/event_federation.py | 38 ++++++++++++++------------------- 5 files changed, 26 insertions(+), 35 deletions(-) create mode 100644 changelog.d/4045.bugfix diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix new file mode 100644 index 0000000000..fa50eb5aff --- /dev/null +++ b/changelog.d/4045.bugfix @@ -0,0 +1 @@ +Fix bug which made get_missing_events return too few events \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 819e8f7331..4efe95faa4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -507,19 +507,19 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): with (yield self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) yield self.check_server_matches_acl(origin_host, room_id) logger.info( "on_get_missing_events: earliest_events: %r, latest_events: %r," - " limit: %d, min_depth: %d", - earliest_events, latest_events, limit, min_depth + " limit: %d", + earliest_events, latest_events, limit, ) missing_events = yield self.handler.on_get_missing_events( - origin, room_id, earliest_events, latest_events, limit, min_depth + origin, room_id, earliest_events, latest_events, limit, ) if len(missing_events) < 5: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2f874b4838..7288d49074 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -560,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, room_id): limit = int(content.get("limit", 10)) - min_depth = int(content.get("min_depth", 0)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) @@ -569,7 +568,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, - min_depth=min_depth, limit=limit, ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..cab57a8849 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -309,8 +309,8 @@ class FederationHandler(BaseHandler): if sent_to_us_directly: logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), + "[%s %s] Rejecting: failed to fetch %d prev events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) raise FederationError( "ERROR", @@ -452,8 +452,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "[%s %s]: Requesting %d prev_events: %s", - room_id, event_id, len(prevs - seen), shortstr(prevs - seen) + "[%s %s]: Requesting missing events between %s and %s", + room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround @@ -1852,7 +1852,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): in_room = yield self.auth.check_host_in_room( room_id, origin @@ -1861,14 +1861,12 @@ class FederationHandler(BaseHandler): raise AuthError(403, "Host not in room.") limit = min(limit, 20) - min_depth = max(min_depth, 0) missing_events = yield self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, - min_depth=min_depth, ) missing_events = yield filter_events_for_server( diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 24345b20a6..3faca2a042 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -376,33 +376,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, @defer.inlineCallbacks def get_missing_events(self, room_id, earliest_events, latest_events, - limit, min_depth): + limit): ids = yield self.runInteraction( "get_missing_events", self._get_missing_events, - room_id, earliest_events, latest_events, limit, min_depth + room_id, earliest_events, latest_events, limit, ) - events = yield self._get_events(ids) - - events = sorted( - [ev for ev in events if ev.depth >= min_depth], - key=lambda e: e.depth, - ) - - defer.returnValue(events[:limit]) + defer.returnValue(events) def _get_missing_events(self, txn, room_id, earliest_events, latest_events, - limit, min_depth): - - earliest_events = set(earliest_events) - front = set(latest_events) - earliest_events + limit): - event_results = set() + seen_events = set(earliest_events) + front = set(latest_events) - seen_events + event_results = [] query = ( "SELECT prev_event_id FROM event_edges " - "WHERE event_id = ? AND is_state = ? " + "WHERE room_id = ? AND event_id = ? AND is_state = ? " "LIMIT ?" ) @@ -411,18 +403,20 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, for event_id in front: txn.execute( query, - (event_id, False, limit - len(event_results)) + (room_id, event_id, False, limit - len(event_results)) ) - for e_id, in txn: - new_front.add(e_id) + new_results = set(t[0] for t in txn) - seen_events - new_front -= earliest_events - new_front -= event_results + new_front |= new_results + seen_events |= new_results + event_results.extend(new_results) front = new_front - event_results |= new_front + # we built the list working backwards from latest_events; we now need to + # reverse it so that the events are approximately chronological. + event_results.reverse() return event_results -- cgit 1.4.1 From c6584f4b5f9cc495478e03e01f85fd2399cf6f8d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 17 Oct 2018 11:36:41 +0100 Subject: clean up config error logic and imports --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 9 ++++----- synapse/handlers/register.py | 30 ++++++++++++++++-------------- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 496ba4f4a0..79c2711fb4 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto join rooms, should autocreate those rooms when first user registers +Servers with auto-join rooms, should automatically create those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 43ff20a637..4b9bf6f2d1 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool -from synapse.config._base import ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config +from ._base import Config, ConfigError class RegistrationConfig(Config): @@ -48,7 +47,7 @@ class RegistrationConfig(Config): self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: if not RoomAlias.is_valid(room_alias): - raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) + raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -106,10 +105,10 @@ class RegistrationConfig(Config): # - "#example:example.com" # Where auto_join_rooms are specified, setting this flag ensures that the - # the rooms exists by creating them when the first user on the + # the rooms exist by creating them when the first user on the # homeserver registers. # Setting to false means that if the rooms are not manually created, - # users cannot be auto joined since they do not exist. + # users cannot be auto-joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2f7bdb0a20..1b5873c8d7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,7 +26,6 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) -from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -51,6 +50,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self.room_creation_handler = self.hs.get_room_creation_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -231,21 +231,23 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - room_creation_handler = self.hs.get_room_creation_handler() if self.hs.hostname != RoomAlias.from_string(r).domain: - raise ConfigError( - 'Cannot create room alias %s, it does not match server domain' + logger.warn( + 'Cannot create room alias %s, ' + 'it does not match server domain' % (r,) + ) + raise SynapseError() + else: + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield self.room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, ) - # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart - yield room_creation_handler.create_room( - fake_requester, - config={ - "preset": "public_chat", - "room_alias_name": room_alias_localpart - }, - ratelimit=False, - ) else: yield self._join_user_to_room(fake_requester, r) except Exception as e: -- cgit 1.4.1 From d6a7797dd1d76a86e6914c2ae562fa83eee4c57f Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 17 Oct 2018 13:04:55 +0100 Subject: Fix roomlist since tokens on Python 3 (#4046) Thanks @Half-Shot !!! --- changelog.d/4046.bugfix | 1 + synapse/handlers/room_list.py | 11 +++++++++-- synapse/python_dependencies.py | 2 +- tests/handlers/test_roomlist.py | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4046.bugfix create mode 100644 tests/handlers/test_roomlist.py diff --git a/changelog.d/4046.bugfix b/changelog.d/4046.bugfix new file mode 100644 index 0000000000..5046dd1ce3 --- /dev/null +++ b/changelog.d/4046.bugfix @@ -0,0 +1 @@ +Fix issue where Python 3 users couldn't paginate /publicRooms diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 38e1737ec9..dc88620885 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -16,7 +16,7 @@ import logging from collections import namedtuple -from six import iteritems +from six import PY3, iteritems from six.moves import range import msgpack @@ -444,9 +444,16 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", ( @classmethod def from_token(cls, token): + if PY3: + # The argument raw=False is only available on new versions of + # msgpack, and only really needed on Python 3. Gate it behind + # a PY3 check to avoid causing issues on Debian-packaged versions. + decoded = msgpack.loads(decode_base64(token), raw=False) + else: + decoded = msgpack.loads(decode_base64(token)) return RoomListNextBatch(**{ cls.REVERSE_KEY_DICT[key]: val - for key, val in msgpack.loads(decode_base64(token)).items() + for key, val in decoded.items() }) def to_token(self): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2947f37f1a..f51184b50d 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -55,7 +55,7 @@ REQUIREMENTS = { "sortedcontainers>=1.4.4": ["sortedcontainers"], "pysaml2>=3.0.0": ["saml2"], "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], - "msgpack-python>=0.3.0": ["msgpack"], + "msgpack-python>=0.4.2": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six>=1.10": ["six"], diff --git a/tests/handlers/test_roomlist.py b/tests/handlers/test_roomlist.py new file mode 100644 index 0000000000..61eebb6985 --- /dev/null +++ b/tests/handlers/test_roomlist.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.handlers.room_list import RoomListNextBatch + +import tests.unittest +import tests.utils + + +class RoomListTestCase(tests.unittest.TestCase): + """ Tests RoomList's RoomListNextBatch. """ + + def setUp(self): + pass + + def test_check_read_batch_tokens(self): + batch_token = RoomListNextBatch( + stream_ordering="abcdef", + public_room_stream_id="123", + current_limit=20, + direction_is_forward=True, + ).to_token() + next_batch = RoomListNextBatch.from_token(batch_token) + self.assertEquals(next_batch.stream_ordering, "abcdef") + self.assertEquals(next_batch.public_room_stream_id, "123") + self.assertEquals(next_batch.current_limit, 20) + self.assertEquals(next_batch.direction_is_forward, True) -- cgit 1.4.1 From df33c164de7d73323814ac439a394173d662e366 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 13:46:08 +0100 Subject: Only colourise synctl output when attached to tty --- synctl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synctl b/synctl index 09b64459b1..111b9c1816 100755 --- a/synctl +++ b/synctl @@ -48,7 +48,12 @@ def pid_running(pid): def write(message, colour=NORMAL, stream=sys.stdout): - if colour == NORMAL: + # Lets check if we're writing to a TTY before colouring + should_colour = False + if stream in (sys.stdout, sys.stderr): + should_colour = stream.isatty() + + if not should_colour or colour == NORMAL: stream.write(message + "\n") else: stream.write(colour + message + NORMAL + "\n") -- cgit 1.4.1 From 1af16acd4ca40b15183ae0a25ffcbc13c36c130d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 13:48:09 +0100 Subject: Newsfile --- changelog.d/4049.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4049.misc diff --git a/changelog.d/4049.misc b/changelog.d/4049.misc new file mode 100644 index 0000000000..4370d9dfa6 --- /dev/null +++ b/changelog.d/4049.misc @@ -0,0 +1 @@ +Only colourise synctl output when attached to tty -- cgit 1.4.1 From f6a0a02a62db1a3030e5563d6454d01c4a76c38d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:09:34 +0100 Subject: Fix bug where we raised StopIteration in a generator This made python 3.7 unhappy --- synapse/rest/media/v1/preview_url_resource.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index af01040a38..8c892ff187 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -596,10 +596,13 @@ def _iterate_over_text(tree, *tags_to_ignore): # to be returned. elements = iter([tree]) while True: - el = next(elements) + el = next(elements, None) + if el is None: + return + if isinstance(el, string_types): yield el - elif el is not None and el.tag not in tags_to_ignore: + elif el.tag not in tags_to_ignore: # el.text is the text before the first child, so we can immediately # return it if the text exists. if el.text: -- cgit 1.4.1 From 3a5d8d5891cc66658c40a796fcf9cf3f0f2e0916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:12:45 +0100 Subject: Newsfile --- changelog.d/4050.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4050.bugfix diff --git a/changelog.d/4050.bugfix b/changelog.d/4050.bugfix new file mode 100644 index 0000000000..3d1f6af847 --- /dev/null +++ b/changelog.d/4050.bugfix @@ -0,0 +1 @@ +Fix URL priewing to work in Python 3.7 -- cgit 1.4.1 From 1519572961d22aeaacbe9fccae7788f0f9e72b1c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 15:44:34 +0100 Subject: Ship the email templates as package_data move the example email templates into the synapse package so that they can be used as package data, which should mean that all of the packaging mechanisms (pip, docker, debian, arch, etc) should now come with the example templates. In order to grandfather in people who relied on the templates being in the old place, check for that situation and fall back to using the defaults if the templates directory does not exist. --- MANIFEST.in | 2 +- changelog.d/4052.feature | 12 +++ docker/conf/homeserver.yaml | 4 +- res/templates/mail-Vector.css | 7 -- res/templates/mail.css | 156 ---------------------------------- res/templates/notif.html | 45 ---------- res/templates/notif.txt | 16 ---- res/templates/notif_mail.html | 55 ------------ res/templates/notif_mail.txt | 10 --- res/templates/room.html | 33 ------- res/templates/room.txt | 9 -- synapse/config/emailconfig.py | 33 ++++++- synapse/push/mailer.py | 5 +- synapse/res/templates/mail-Vector.css | 7 ++ synapse/res/templates/mail.css | 156 ++++++++++++++++++++++++++++++++++ synapse/res/templates/notif.html | 45 ++++++++++ synapse/res/templates/notif.txt | 16 ++++ synapse/res/templates/notif_mail.html | 55 ++++++++++++ synapse/res/templates/notif_mail.txt | 10 +++ synapse/res/templates/room.html | 33 +++++++ synapse/res/templates/room.txt | 9 ++ 21 files changed, 381 insertions(+), 337 deletions(-) create mode 100644 changelog.d/4052.feature delete mode 100644 res/templates/mail-Vector.css delete mode 100644 res/templates/mail.css delete mode 100644 res/templates/notif.html delete mode 100644 res/templates/notif.txt delete mode 100644 res/templates/notif_mail.html delete mode 100644 res/templates/notif_mail.txt delete mode 100644 res/templates/room.html delete mode 100644 res/templates/room.txt create mode 100644 synapse/res/templates/mail-Vector.css create mode 100644 synapse/res/templates/mail.css create mode 100644 synapse/res/templates/notif.html create mode 100644 synapse/res/templates/notif.txt create mode 100644 synapse/res/templates/notif_mail.html create mode 100644 synapse/res/templates/notif_mail.txt create mode 100644 synapse/res/templates/room.html create mode 100644 synapse/res/templates/room.txt diff --git a/MANIFEST.in b/MANIFEST.in index c6a37ac685..25cdf0a61b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,12 +12,12 @@ recursive-include synapse/storage/schema *.sql recursive-include synapse/storage/schema *.py recursive-include docs * -recursive-include res * recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py +recursive-include synapse/res * recursive-include synapse/static *.css recursive-include synapse/static *.gif recursive-include synapse/static *.html diff --git a/changelog.d/4052.feature b/changelog.d/4052.feature new file mode 100644 index 0000000000..0c01f06880 --- /dev/null +++ b/changelog.d/4052.feature @@ -0,0 +1,12 @@ +Ship the example email templates as part of the package + +**Note**: if you deploy your Synapse instance from a git checkout or a github +snapshot URL, then this means that the example email templates will no longer +be installed in `res/templates`. If you have email notifications enabled, you +should ensure that `email.template_dir` is either configured to point at a +directory where you have installed customised templates, or leave it unset to +use the default templates. + +The configuration parser will try to detect the situation where +`email.template_dir` is incorrectly set to `res/templates` and do the right +thing, but will warn about this. \ No newline at end of file diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index cfe88788f2..a38b929f50 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -211,7 +211,9 @@ email: require_transport_security: False notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}" app_name: Matrix - template_dir: res/templates + # if template_dir is unset, uses the example templates that are part of + # the Synapse distribution. + #template_dir: res/templates notif_template_html: notif_mail.html notif_template_text: notif_mail.txt notif_for_new_users: True diff --git a/res/templates/mail-Vector.css b/res/templates/mail-Vector.css deleted file mode 100644 index 6a3e36eda1..0000000000 --- a/res/templates/mail-Vector.css +++ /dev/null @@ -1,7 +0,0 @@ -.header { - border-bottom: 4px solid #e4f7ed ! important; -} - -.notif_link a, .footer a { - color: #76CFA6 ! important; -} diff --git a/res/templates/mail.css b/res/templates/mail.css deleted file mode 100644 index 5ab3e1b06d..0000000000 --- a/res/templates/mail.css +++ /dev/null @@ -1,156 +0,0 @@ -body { - margin: 0px; -} - -pre, code { - word-break: break-word; - white-space: pre-wrap; -} - -#page { - font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; - font-color: #454545; - font-size: 12pt; - width: 100%; - padding: 20px; -} - -#inner { - width: 640px; -} - -.header { - width: 100%; - height: 87px; - color: #454545; - border-bottom: 4px solid #e5e5e5; -} - -.logo { - text-align: right; - margin-left: 20px; -} - -.salutation { - padding-top: 10px; - font-weight: bold; -} - -.summarytext { -} - -.room { - width: 100%; - color: #454545; - border-bottom: 1px solid #e5e5e5; -} - -.room_header td { - padding-top: 38px; - padding-bottom: 10px; - border-bottom: 1px solid #e5e5e5; -} - -.room_name { - vertical-align: middle; - font-size: 18px; - font-weight: bold; -} - -.room_header h2 { - margin-top: 0px; - margin-left: 75px; - font-size: 20px; -} - -.room_avatar { - width: 56px; - line-height: 0px; - text-align: center; - vertical-align: middle; -} - -.room_avatar img { - width: 48px; - height: 48px; - object-fit: cover; - border-radius: 24px; -} - -.notif { - border-bottom: 1px solid #e5e5e5; - margin-top: 16px; - padding-bottom: 16px; -} - -.historical_message .sender_avatar { - opacity: 0.3; -} - -/* spell out opacity and historical_message class names for Outlook aka Word */ -.historical_message .sender_name { - color: #e3e3e3; -} - -.historical_message .message_time { - color: #e3e3e3; -} - -.historical_message .message_body { - color: #c7c7c7; -} - -.historical_message td, -.message td { - padding-top: 10px; -} - -.sender_avatar { - width: 56px; - text-align: center; - vertical-align: top; -} - -.sender_avatar img { - margin-top: -2px; - width: 32px; - height: 32px; - border-radius: 16px; -} - -.sender_name { - display: inline; - font-size: 13px; - color: #a2a2a2; -} - -.message_time { - text-align: right; - width: 100px; - font-size: 11px; - color: #a2a2a2; -} - -.message_body { -} - -.notif_link td { - padding-top: 10px; - padding-bottom: 10px; - font-weight: bold; -} - -.notif_link a, .footer a { - color: #454545; - text-decoration: none; -} - -.debug { - font-size: 10px; - color: #888; -} - -.footer { - margin-top: 20px; - text-align: center; -} \ No newline at end of file diff --git a/res/templates/notif.html b/res/templates/notif.html deleted file mode 100644 index 88b921ca9c..0000000000 --- a/res/templates/notif.html +++ /dev/null @@ -1,45 +0,0 @@ -{% for message in notif.messages %} - - - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {% if message.sender_avatar_url %} - - {% else %} - {% if message.sender_hash % 3 == 0 %} - - {% elif message.sender_hash % 3 == 1 %} - - {% else %} - - {% endif %} - {% endif %} - {% endif %} - - - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
- {% endif %} -
- {% if message.msgtype == "m.text" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.emote" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.notice" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.image" %} - - {% elif message.msgtype == "m.file" %} - {{ message.body_text_plain }} - {% endif %} -
- - {{ message.ts|format_ts("%H:%M") }} - -{% endfor %} - - - - View {{ room.title }} - - - diff --git a/res/templates/notif.txt b/res/templates/notif.txt deleted file mode 100644 index a37bee9833..0000000000 --- a/res/templates/notif.txt +++ /dev/null @@ -1,16 +0,0 @@ -{% for message in notif.messages %} -{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) -{% if message.msgtype == "m.text" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.emote" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.notice" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.image" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.file" %} -{{ message.body_text_plain }} -{% endif %} -{% endfor %} - -View {{ room.title }} at {{ notif.link }} diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html deleted file mode 100644 index fcdb3109fe..0000000000 --- a/res/templates/notif_mail.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - - - -
- - - - - -
-
Hi {{ user_display_name }},
-
{{ summary_text }}
-
- {% for room in rooms %} - {% include 'room.html' with context %} - {% endfor %} - -
- - diff --git a/res/templates/notif_mail.txt b/res/templates/notif_mail.txt deleted file mode 100644 index 24843042a5..0000000000 --- a/res/templates/notif_mail.txt +++ /dev/null @@ -1,10 +0,0 @@ -Hi {{ user_display_name }}, - -{{ summary_text }} - -{% for room in rooms %} -{% include 'room.txt' with context %} -{% endfor %} - -You can disable these notifications at {{ unsubscribe_link }} - diff --git a/res/templates/room.html b/res/templates/room.html deleted file mode 100644 index 723c222d25..0000000000 --- a/res/templates/room.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - {% if room.invite %} - - - - - - {% else %} - {% for notif in room.notifs %} - {% include 'notif.html' with context %} - {% endfor %} - {% endif %} -
- {% if room.avatar_url %} - - {% else %} - {% if room.hash % 3 == 0 %} - - {% elif room.hash % 3 == 1 %} - - {% else %} - - {% endif %} - {% endif %} - - {{ room.title }} -
- Join the conversation. -
diff --git a/res/templates/room.txt b/res/templates/room.txt deleted file mode 100644 index 84648c710e..0000000000 --- a/res/templates/room.txt +++ /dev/null @@ -1,9 +0,0 @@ -{{ room.title }} - -{% if room.invite %} - You've been invited, join at {{ room.link }} -{% else %} - {% for notif in room.notifs %} - {% include 'notif.txt' with context %} - {% endfor %} -{% endif %} diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index fe156b6930..862f92c6ae 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -13,11 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function + # This file can't be called email.py because if it is, we cannot: import email.utils +import logging +import os +import sys +import textwrap from ._base import Config +logger = logging.getLogger(__name__) + class EmailConfig(Config): def read_config(self, config): @@ -38,7 +46,6 @@ class EmailConfig(Config): "smtp_host", "smtp_port", "notif_from", - "template_dir", "notif_template_html", "notif_template_text", ] @@ -62,9 +69,27 @@ class EmailConfig(Config): self.email_smtp_host = email_config["smtp_host"] self.email_smtp_port = email_config["smtp_port"] self.email_notif_from = email_config["notif_from"] - self.email_template_dir = email_config["template_dir"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] + + self.email_template_dir = email_config.get("template_dir") + + # backwards-compatibility hack + if ( + self.email_template_dir == "res/templates" + and not os.path.isfile( + os.path.join(self.email_template_dir, self.email_notif_template_text) + ) + ): + t = """\ +WARNING: The email notifier is configured to look for templates in '%s', but no templates +could be found there. We will fall back to using the example templates; to get rid of this +warning, leave 'email.template_dir' unset. +""" % (self.email_template_dir,) + + print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) + self.email_template_dir = None + self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True ) @@ -113,7 +138,9 @@ class EmailConfig(Config): # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix - # template_dir: res/templates + # # if template_dir is unset, uses the example templates that are part of + # # the Synapse distribution. + # #template_dir: res/templates # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt # notif_for_new_users: True diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 1a5a10d974..b9dcfee740 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -528,7 +528,10 @@ def load_jinja2_templates(config): """ logger.info("loading jinja2") - loader = jinja2.FileSystemLoader(config.email_template_dir) + if config.email_template_dir: + loader = jinja2.FileSystemLoader(config.email_template_dir) + else: + loader = jinja2.PackageLoader('synapse', 'res/templates') env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) diff --git a/synapse/res/templates/mail-Vector.css b/synapse/res/templates/mail-Vector.css new file mode 100644 index 0000000000..6a3e36eda1 --- /dev/null +++ b/synapse/res/templates/mail-Vector.css @@ -0,0 +1,7 @@ +.header { + border-bottom: 4px solid #e4f7ed ! important; +} + +.notif_link a, .footer a { + color: #76CFA6 ! important; +} diff --git a/synapse/res/templates/mail.css b/synapse/res/templates/mail.css new file mode 100644 index 0000000000..5ab3e1b06d --- /dev/null +++ b/synapse/res/templates/mail.css @@ -0,0 +1,156 @@ +body { + margin: 0px; +} + +pre, code { + word-break: break-word; + white-space: pre-wrap; +} + +#page { + font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; + font-color: #454545; + font-size: 12pt; + width: 100%; + padding: 20px; +} + +#inner { + width: 640px; +} + +.header { + width: 100%; + height: 87px; + color: #454545; + border-bottom: 4px solid #e5e5e5; +} + +.logo { + text-align: right; + margin-left: 20px; +} + +.salutation { + padding-top: 10px; + font-weight: bold; +} + +.summarytext { +} + +.room { + width: 100%; + color: #454545; + border-bottom: 1px solid #e5e5e5; +} + +.room_header td { + padding-top: 38px; + padding-bottom: 10px; + border-bottom: 1px solid #e5e5e5; +} + +.room_name { + vertical-align: middle; + font-size: 18px; + font-weight: bold; +} + +.room_header h2 { + margin-top: 0px; + margin-left: 75px; + font-size: 20px; +} + +.room_avatar { + width: 56px; + line-height: 0px; + text-align: center; + vertical-align: middle; +} + +.room_avatar img { + width: 48px; + height: 48px; + object-fit: cover; + border-radius: 24px; +} + +.notif { + border-bottom: 1px solid #e5e5e5; + margin-top: 16px; + padding-bottom: 16px; +} + +.historical_message .sender_avatar { + opacity: 0.3; +} + +/* spell out opacity and historical_message class names for Outlook aka Word */ +.historical_message .sender_name { + color: #e3e3e3; +} + +.historical_message .message_time { + color: #e3e3e3; +} + +.historical_message .message_body { + color: #c7c7c7; +} + +.historical_message td, +.message td { + padding-top: 10px; +} + +.sender_avatar { + width: 56px; + text-align: center; + vertical-align: top; +} + +.sender_avatar img { + margin-top: -2px; + width: 32px; + height: 32px; + border-radius: 16px; +} + +.sender_name { + display: inline; + font-size: 13px; + color: #a2a2a2; +} + +.message_time { + text-align: right; + width: 100px; + font-size: 11px; + color: #a2a2a2; +} + +.message_body { +} + +.notif_link td { + padding-top: 10px; + padding-bottom: 10px; + font-weight: bold; +} + +.notif_link a, .footer a { + color: #454545; + text-decoration: none; +} + +.debug { + font-size: 10px; + color: #888; +} + +.footer { + margin-top: 20px; + text-align: center; +} \ No newline at end of file diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html new file mode 100644 index 0000000000..88b921ca9c --- /dev/null +++ b/synapse/res/templates/notif.html @@ -0,0 +1,45 @@ +{% for message in notif.messages %} + + + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {% if message.sender_avatar_url %} + + {% else %} + {% if message.sender_hash % 3 == 0 %} + + {% elif message.sender_hash % 3 == 1 %} + + {% else %} + + {% endif %} + {% endif %} + {% endif %} + + + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} +
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
+ {% endif %} +
+ {% if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.image" %} + + {% elif message.msgtype == "m.file" %} + {{ message.body_text_plain }} + {% endif %} +
+ + {{ message.ts|format_ts("%H:%M") }} + +{% endfor %} + + + + View {{ room.title }} + + + diff --git a/synapse/res/templates/notif.txt b/synapse/res/templates/notif.txt new file mode 100644 index 0000000000..a37bee9833 --- /dev/null +++ b/synapse/res/templates/notif.txt @@ -0,0 +1,16 @@ +{% for message in notif.messages %} +{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{% if message.msgtype == "m.text" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.emote" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.notice" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.image" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.file" %} +{{ message.body_text_plain }} +{% endif %} +{% endfor %} + +View {{ room.title }} at {{ notif.link }} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html new file mode 100644 index 0000000000..fcdb3109fe --- /dev/null +++ b/synapse/res/templates/notif_mail.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + +
+ + + + + +
+
Hi {{ user_display_name }},
+
{{ summary_text }}
+
+ {% for room in rooms %} + {% include 'room.html' with context %} + {% endfor %} + +
+ + diff --git a/synapse/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt new file mode 100644 index 0000000000..24843042a5 --- /dev/null +++ b/synapse/res/templates/notif_mail.txt @@ -0,0 +1,10 @@ +Hi {{ user_display_name }}, + +{{ summary_text }} + +{% for room in rooms %} +{% include 'room.txt' with context %} +{% endfor %} + +You can disable these notifications at {{ unsubscribe_link }} + diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html new file mode 100644 index 0000000000..723c222d25 --- /dev/null +++ b/synapse/res/templates/room.html @@ -0,0 +1,33 @@ + + + + + + {% if room.invite %} + + + + + + {% else %} + {% for notif in room.notifs %} + {% include 'notif.html' with context %} + {% endfor %} + {% endif %} +
+ {% if room.avatar_url %} + + {% else %} + {% if room.hash % 3 == 0 %} + + {% elif room.hash % 3 == 1 %} + + {% else %} + + {% endif %} + {% endif %} + + {{ room.title }} +
+ Join the conversation. +
diff --git a/synapse/res/templates/room.txt b/synapse/res/templates/room.txt new file mode 100644 index 0000000000..84648c710e --- /dev/null +++ b/synapse/res/templates/room.txt @@ -0,0 +1,9 @@ +{{ room.title }} + +{% if room.invite %} + You've been invited, join at {{ room.link }} +{% else %} + {% for notif in room.notifs %} + {% include 'notif.txt' with context %} + {% endfor %} +{% endif %} -- cgit 1.4.1 From c8f2c199910b70a5d94acf5597a4f3ba94cd3969 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 16:56:22 +0100 Subject: Put the warning blob at the top of the file --- synapse/config/emailconfig.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 862f92c6ae..e2582cfecc 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -26,6 +26,12 @@ from ._base import Config logger = logging.getLogger(__name__) +TEMPLATE_DIR_WARNING = """\ +WARNING: The email notifier is configured to look for templates in '%(template_dir)s', +but no templates could be found there. We will fall back to using the example templates; +to get rid of this warning, leave 'email.template_dir' unset. +""" + class EmailConfig(Config): def read_config(self, config): @@ -81,12 +87,9 @@ class EmailConfig(Config): os.path.join(self.email_template_dir, self.email_notif_template_text) ) ): - t = """\ -WARNING: The email notifier is configured to look for templates in '%s', but no templates -could be found there. We will fall back to using the example templates; to get rid of this -warning, leave 'email.template_dir' unset. -""" % (self.email_template_dir,) - + t = TEMPLATE_DIR_WARNING % { + "template_dir": self.email_template_dir, + } print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) self.email_template_dir = None -- cgit 1.4.1 From a5aea15a6b0f4c389a243bbb3ac375c9ed128146 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 17:06:49 +0100 Subject: Assume isatty is always defined, and catch AttributeError. Also don't bother checking colour==Normal --- synctl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synctl b/synctl index 111b9c1816..4bd4c68b37 100755 --- a/synctl +++ b/synctl @@ -50,10 +50,14 @@ def pid_running(pid): def write(message, colour=NORMAL, stream=sys.stdout): # Lets check if we're writing to a TTY before colouring should_colour = False - if stream in (sys.stdout, sys.stderr): + try: should_colour = stream.isatty() + except AttributeError: + # Just in case `isatty` isn't defined on everything. The python + # docs are incredibly vague. + pass - if not should_colour or colour == NORMAL: + if not should_colour: stream.write(message + "\n") else: stream.write(colour + message + NORMAL + "\n") -- cgit 1.4.1 From 0fd2321629f4a06bec8b552299d3ced32a7a021c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 20:37:16 +0100 Subject: Fix incorrect truncation in get_missing_events It's quite important that get_missing_events returns the *latest* events in the room; however we were pulling event ids out of the database until we got *at least* 10, and then taking the *earliest* of the results. We also shouldn't really be relying on depth, and should be checking the room_id. --- changelog.d/4045.bugfix | 1 + synapse/federation/federation_server.py | 8 +++---- synapse/federation/transport/server.py | 2 -- synapse/handlers/federation.py | 12 +++++------ synapse/storage/event_federation.py | 38 ++++++++++++++------------------- 5 files changed, 26 insertions(+), 35 deletions(-) create mode 100644 changelog.d/4045.bugfix diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix new file mode 100644 index 0000000000..fa50eb5aff --- /dev/null +++ b/changelog.d/4045.bugfix @@ -0,0 +1 @@ +Fix bug which made get_missing_events return too few events \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 819e8f7331..4efe95faa4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -507,19 +507,19 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): with (yield self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) yield self.check_server_matches_acl(origin_host, room_id) logger.info( "on_get_missing_events: earliest_events: %r, latest_events: %r," - " limit: %d, min_depth: %d", - earliest_events, latest_events, limit, min_depth + " limit: %d", + earliest_events, latest_events, limit, ) missing_events = yield self.handler.on_get_missing_events( - origin, room_id, earliest_events, latest_events, limit, min_depth + origin, room_id, earliest_events, latest_events, limit, ) if len(missing_events) < 5: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2f874b4838..7288d49074 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -560,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, room_id): limit = int(content.get("limit", 10)) - min_depth = int(content.get("min_depth", 0)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) @@ -569,7 +568,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, - min_depth=min_depth, limit=limit, ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..cab57a8849 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -309,8 +309,8 @@ class FederationHandler(BaseHandler): if sent_to_us_directly: logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), + "[%s %s] Rejecting: failed to fetch %d prev events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) raise FederationError( "ERROR", @@ -452,8 +452,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "[%s %s]: Requesting %d prev_events: %s", - room_id, event_id, len(prevs - seen), shortstr(prevs - seen) + "[%s %s]: Requesting missing events between %s and %s", + room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround @@ -1852,7 +1852,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): in_room = yield self.auth.check_host_in_room( room_id, origin @@ -1861,14 +1861,12 @@ class FederationHandler(BaseHandler): raise AuthError(403, "Host not in room.") limit = min(limit, 20) - min_depth = max(min_depth, 0) missing_events = yield self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, - min_depth=min_depth, ) missing_events = yield filter_events_for_server( diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 24345b20a6..3faca2a042 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -376,33 +376,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, @defer.inlineCallbacks def get_missing_events(self, room_id, earliest_events, latest_events, - limit, min_depth): + limit): ids = yield self.runInteraction( "get_missing_events", self._get_missing_events, - room_id, earliest_events, latest_events, limit, min_depth + room_id, earliest_events, latest_events, limit, ) - events = yield self._get_events(ids) - - events = sorted( - [ev for ev in events if ev.depth >= min_depth], - key=lambda e: e.depth, - ) - - defer.returnValue(events[:limit]) + defer.returnValue(events) def _get_missing_events(self, txn, room_id, earliest_events, latest_events, - limit, min_depth): - - earliest_events = set(earliest_events) - front = set(latest_events) - earliest_events + limit): - event_results = set() + seen_events = set(earliest_events) + front = set(latest_events) - seen_events + event_results = [] query = ( "SELECT prev_event_id FROM event_edges " - "WHERE event_id = ? AND is_state = ? " + "WHERE room_id = ? AND event_id = ? AND is_state = ? " "LIMIT ?" ) @@ -411,18 +403,20 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, for event_id in front: txn.execute( query, - (event_id, False, limit - len(event_results)) + (room_id, event_id, False, limit - len(event_results)) ) - for e_id, in txn: - new_front.add(e_id) + new_results = set(t[0] for t in txn) - seen_events - new_front -= earliest_events - new_front -= event_results + new_front |= new_results + seen_events |= new_results + event_results.extend(new_results) front = new_front - event_results |= new_front + # we built the list working backwards from latest_events; we now need to + # reverse it so that the events are approximately chronological. + event_results.reverse() return event_results -- cgit 1.4.1 From 52e3d3813baf82dc16aae42bdd108cf296c3ead7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 17:40:01 +0100 Subject: prep changelog --- CHANGES.md | 25 +++++++++++++++++++++++++ changelog.d/4045.bugfix | 1 - changelog.d/4052.feature | 12 ------------ 3 files changed, 25 insertions(+), 13 deletions(-) delete mode 100644 changelog.d/4045.bugfix delete mode 100644 changelog.d/4052.feature diff --git a/CHANGES.md b/CHANGES.md index 6c80a86461..2598bfc333 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,28 @@ +Synapse 0.33.7rc2 (2018-10-17) +============================== + +**Warning**: This release removes the example email notification templates from `res/templates` (they are now internal to the python package). This should only affect you if you (a) deploy your Synapse instance from a git checkout or a github snapshot URL, and (b) have email notifications enabled. + +If you have email notifications enabled, you should ensure that +`email.template_dir` is either configured to point at a directory where you +have installed customised templates, or leave it unset to use the default +templates. + +The configuration parser will try to detect the situation where +`email.template_dir` is incorrectly set to `res/templates` and do the right +thing, but will warn about this. + +Features +-------- + +- Ship the example email templates as part of the package ([\#4052](https://github.com/matrix-org/synapse/issues/4052)) + +Bugfixes +-------- + +- Fix bug which made get_missing_events return too few events ([\#4045](https://github.com/matrix-org/synapse/issues/4045)) + + Synapse 0.33.7rc1 (2018-10-15) ============================== diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix deleted file mode 100644 index fa50eb5aff..0000000000 --- a/changelog.d/4045.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which made get_missing_events return too few events \ No newline at end of file diff --git a/changelog.d/4052.feature b/changelog.d/4052.feature deleted file mode 100644 index 0c01f06880..0000000000 --- a/changelog.d/4052.feature +++ /dev/null @@ -1,12 +0,0 @@ -Ship the example email templates as part of the package - -**Note**: if you deploy your Synapse instance from a git checkout or a github -snapshot URL, then this means that the example email templates will no longer -be installed in `res/templates`. If you have email notifications enabled, you -should ensure that `email.template_dir` is either configured to point at a -directory where you have installed customised templates, or leave it unset to -use the default templates. - -The configuration parser will try to detect the situation where -`email.template_dir` is incorrectly set to `res/templates` and do the right -thing, but will warn about this. \ No newline at end of file -- cgit 1.4.1 From c7d0f34a3c0217be4ca06d934b66fcdfc22a7219 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 17:40:19 +0100 Subject: v0.33.7rc2 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index ea07fda14a..a0c4ef6488 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7rc1" +__version__ = "0.33.7rc2" -- cgit 1.4.1 From 8c2b8d7f0bfb5043c5e22f09fa69f01b4656d3d2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 17:42:12 +0100 Subject: fix changelog formatting --- CHANGES.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 2598bfc333..7de66d6ab4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,10 @@ Synapse 0.33.7rc2 (2018-10-17) ============================== -**Warning**: This release removes the example email notification templates from `res/templates` (they are now internal to the python package). This should only affect you if you (a) deploy your Synapse instance from a git checkout or a github snapshot URL, and (b) have email notifications enabled. +**Warning**: This release removes the example email notification templates from +`res/templates` (they are now internal to the python package). This should only +affect you if you (a) deploy your Synapse instance from a git checkout or a +github snapshot URL, and (b) have email notifications enabled. If you have email notifications enabled, you should ensure that `email.template_dir` is either configured to point at a directory where you -- cgit 1.4.1 From 926da4dda84178b83fa00813d31f1bceef25171f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Oct 2018 14:57:32 +0100 Subject: 0.33.7 --- CHANGES.md | 7 +++++-- synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7de66d6ab4..5f598559a0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 0.33.7rc2 (2018-10-17) -============================== +Synapse 0.33.7 (2018-10-18) +=========================== **Warning**: This release removes the example email notification templates from `res/templates` (they are now internal to the python package). This should only @@ -15,6 +15,9 @@ The configuration parser will try to detect the situation where `email.template_dir` is incorrectly set to `res/templates` and do the right thing, but will warn about this. +Synapse 0.33.7rc2 (2018-10-17) +============================== + Features -------- diff --git a/synapse/__init__.py b/synapse/__init__.py index a0c4ef6488..1ddbbbebfb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7rc2" +__version__ = "0.33.7" -- cgit 1.4.1 From 03287c350eec760b17a4739274d84ca8dd86143a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Oct 2018 15:09:34 +0100 Subject: remove redundant changelog file this change has been released --- changelog.d/4045.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/4045.bugfix diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix deleted file mode 100644 index fa50eb5aff..0000000000 --- a/changelog.d/4045.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which made get_missing_events return too few events \ No newline at end of file -- cgit 1.4.1 From c00f4d237b31cf4f7f330bbc65169a1fbfb7b5e7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Oct 2018 17:17:39 +0100 Subject: Add warnings about the upgrade to 0.33.7 --- UPGRADE.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/UPGRADE.rst b/UPGRADE.rst index 6cf3169f75..201d298123 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -48,6 +48,23 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to v0.33.7 +==================== + +This release removes the example email notification templates from +``res/templates`` (they are now internal to the python package). This should +only affect you if you (a) deploy your Synapse instance from a git checkout or +a github snapshot URL, and (b) have email notifications enabled. + +If you have email notifications enabled, you should ensure that +``email.template_dir`` is either configured to point at a directory where you +have installed customised templates, or leave it unset to use the default +templates. + +The configuration parser will try to detect the situation where +``email.template_dir`` is incorrectly set to ``res/templates`` and do the right +thing, but will warn about this. + Upgrading to v0.27.3 ==================== -- cgit 1.4.1 From 88c5ffec336116d1d6eafd31b2c91a87d7d43ff7 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 18 Oct 2018 12:35:30 -0600 Subject: Test for terms UI auth --- tests/rest/client/v2_alpha/test_register.py | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 1c128e81f5..a802e1a406 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -174,3 +174,73 @@ class RegisterRestServletTestCase(unittest.TestCase): self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals(channel.json_body["error"], "Guest access is disabled") + + def test_POST_terms_auth(self): + self.hs.config.block_events_without_consent_error = True + self.hs.config.public_baseurl = "https://example.org" + self.hs.config.user_consent_version = "1.0" + + # Do a UI auth request + reqest, channel = make_request(b"POST", self.url, b"{}") + render(request, self.resource, self.clock) + + self.assertEquals(channel.result["code"], b"401", channel.result) + + self.assertIsInstance(channel.json_body["session"], str) + + self.assertIsInstance(channel.json_body["flows"], list) + for flow in channel.json_body["flows"]: + self.assertIsInstance(flow["stages"], list) + self.assertTrue(len(flow["stages"]) > 0) + self.assertEquals(flow["stages"][-1], "m.login.terms") + + expected_params = { + "m.login.terms": { + "policies": { + "privacy_policy": { + "en": { + "name": "Privacy Policy", + "url": "https://example.org/_matrix/consent", + }, + "version": "1.0" + }, + }, + }, + } + self.assertIsInstance(channel.json_body["params"], dict) + self.assertDictContainsSubset(channel.json_body["params"], expected_params) + + # Completing the stage should result in the stage being completed + + user_id = "@kermit:muppet" + token = "kermits_access_token" + device_id = "frogfone" + request_data = json.dumps( + { + "username": "kermit", + "password": "monkey", + "device_id": device_id, + "session": channel.json_body["session"], + } + ) + self.registration_handler.check_username = Mock(return_value=True) + self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) + self.registration_handler.register = Mock(return_value=(user_id, None)) + self.auth_handler.get_access_token_for_user_id = Mock(return_value=token) + self.device_handler.check_device_registered = Mock(return_value=device_id) + + + request, channel = make_request(b"POST", self.url, request_data) + render(request, self.resource, self.clock) + + det_data = { + "user_id": user_id, + "access_token": token, + "home_server": self.hs.hostname, + "device_id": device_id, + } + self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertDictContainsSubset(det_data, channel.json_body) + self.auth_handler.get_login_tuple_for_user_id( + user_id, device_id=device_id, initial_device_display_name=None + ) -- cgit 1.4.1 From dba84fa69c55c61d347169d5210bb63f65849fb5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 18 Oct 2018 12:45:21 -0600 Subject: Fix terms UI auth test --- tests/rest/client/v2_alpha/test_register.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index a802e1a406..36eaabbad8 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -181,7 +181,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.hs.config.user_consent_version = "1.0" # Do a UI auth request - reqest, channel = make_request(b"POST", self.url, b"{}") + request, channel = make_request(b"POST", self.url, b"{}") render(request, self.resource, self.clock) self.assertEquals(channel.result["code"], b"401", channel.result) @@ -220,7 +220,10 @@ class RegisterRestServletTestCase(unittest.TestCase): "username": "kermit", "password": "monkey", "device_id": device_id, - "session": channel.json_body["session"], + "auth": { + "session": channel.json_body["session"], + "type": "m.login.terms", + }, } ) self.registration_handler.check_username = Mock(return_value=True) -- cgit 1.4.1 From c69026a7586fe51a5def39cf0a581869b1228419 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Oct 2018 21:04:34 +0100 Subject: Use the right python when starting workers We should use the same python to start the workers as we do for the main synapse (ie, the same one used to run synctl.) --- changelog.d/4057.bugfix | 1 + synctl | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4057.bugfix diff --git a/changelog.d/4057.bugfix b/changelog.d/4057.bugfix new file mode 100644 index 0000000000..7577731255 --- /dev/null +++ b/changelog.d/4057.bugfix @@ -0,0 +1 @@ +synctl will use the right python executable to run worker processes \ No newline at end of file diff --git a/synctl b/synctl index 4bd4c68b37..bb8cb084cc 100755 --- a/synctl +++ b/synctl @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +87,7 @@ def start(configfile): def start_worker(app, configfile, worker_configfile): args = [ - "python", "-B", + sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile -- cgit 1.4.1 From a36b0ec195c4fe88730d656c8668a5dda1a6f1a7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 09:24:00 +1100 Subject: make a bytestring --- synapse/util/manhole.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 8d0f2a8918..cf43ab6a19 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -82,7 +82,7 @@ def manhole(username, password, globals): ) factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker])) - factory.publicKeys['ssh-rsa'] = Key.fromString(PUBLIC_KEY) - factory.privateKeys['ssh-rsa'] = Key.fromString(PRIVATE_KEY) + factory.publicKeys[b'ssh-rsa'] = Key.fromString(PUBLIC_KEY) + factory.privateKeys[b'ssh-rsa'] = Key.fromString(PRIVATE_KEY) return factory -- cgit 1.4.1 From 1d17fc52aedc8f2b2a4c3cbbfb460dccb4b08b4c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 09:27:10 +1100 Subject: changelog --- changelog.d/4060.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4060.bugfix diff --git a/changelog.d/4060.bugfix b/changelog.d/4060.bugfix new file mode 100644 index 0000000000..78d69a8819 --- /dev/null +++ b/changelog.d/4060.bugfix @@ -0,0 +1 @@ +Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. -- cgit 1.4.1 From 74e761708398d5170783912f02d120f20113205e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:14:24 +0100 Subject: Clean up room alias creation --- synapse/handlers/directory.py | 77 ++++++++++++++++++++++--------------- synapse/handlers/room.py | 5 ++- synapse/rest/client/v1/directory.py | 37 +++--------------- 3 files changed, 55 insertions(+), 64 deletions(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 18741c5fac..02f12f6645 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -80,42 +80,60 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def create_association(self, user_id, room_alias, room_id, servers=None): - # association creation for human users - # TODO(erikj): Do user auth. + def create_association(self, requester, room_alias, room_id, servers=None, + send_event=True): + """Attempt to create a new alias - if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): - raise SynapseError( - 403, "This user is not permitted to create this alias", - ) + Args: + requester (Requester) + room_alias (RoomAlias) + room_id (str) + servers (list[str]|None): List of servers that others servers + should try and join via + send_event (bool): Whether to send an updated m.room.aliases event - can_create = yield self.can_modify_alias( - room_alias, - user_id=user_id - ) - if not can_create: - raise SynapseError( - 400, "This alias is reserved by an application service.", - errcode=Codes.EXCLUSIVE - ) - yield self._create_association(room_alias, room_id, servers, creator=user_id) + Returns: + Deferred + """ - @defer.inlineCallbacks - def create_appservice_association(self, service, room_alias, room_id, - servers=None): - if not service.is_interested_in_alias(room_alias.to_string()): - raise SynapseError( - 400, "This application service has not reserved" - " this kind of alias.", errcode=Codes.EXCLUSIVE + user_id = requester.user.to_string() + + service = requester.app_service + if service: + if not service.is_interested_in_alias(room_alias.to_string()): + raise SynapseError( + 400, "This application service has not reserved" + " this kind of alias.", errcode=Codes.EXCLUSIVE + ) + else: + if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): + raise AuthError( + 403, "This user is not permitted to create this alias", + ) + + can_create = yield self.can_modify_alias( + room_alias, + user_id=user_id ) + if not can_create: + raise AuthError( + 400, "This alias is reserved by an application service.", + errcode=Codes.EXCLUSIVE + ) - # association creation for app services - yield self._create_association(room_alias, room_id, servers) + yield self._create_association(room_alias, room_id, servers, creator=user_id) + if send_event: + yield self.send_room_alias_update_event( + requester, + room_id + ) @defer.inlineCallbacks - def delete_association(self, requester, user_id, room_alias): + def delete_association(self, requester, room_alias): # association deletion for human users + user_id = requester.user.to_string() + try: can_delete = yield self._user_can_delete_alias(room_alias, user_id) except StoreError as e: @@ -143,7 +161,6 @@ class DirectoryHandler(BaseHandler): try: yield self.send_room_alias_update_event( requester, - requester.user.to_string(), room_id ) @@ -261,7 +278,7 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def send_room_alias_update_event(self, requester, user_id, room_id): + def send_room_alias_update_event(self, requester, room_id): aliases = yield self.store.get_aliases_for_room(room_id) yield self.event_creation_handler.create_and_send_nonmember_event( @@ -270,7 +287,7 @@ class DirectoryHandler(BaseHandler): "type": EventTypes.Aliases, "state_key": self.hs.hostname, "room_id": room_id, - "sender": user_id, + "sender": requester.user.to_string(), "content": {"aliases": aliases}, }, ratelimit=False diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index c3f820b975..ab1571b27b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -190,10 +190,11 @@ class RoomCreationHandler(BaseHandler): if room_alias: directory_handler = self.hs.get_handlers().directory_handler yield directory_handler.create_association( - user_id=user_id, + requester=requester, room_id=room_id, room_alias=room_alias, servers=[self.hs.hostname], + send_event=False, ) preset_config = config.get( @@ -289,7 +290,7 @@ class RoomCreationHandler(BaseHandler): if room_alias: result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event( - requester, user_id, room_id + requester, room_id ) defer.returnValue(result) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 97733f3026..0220acf644 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -74,38 +74,11 @@ class ClientDirectoryServer(ClientV1RestServlet): if room is None: raise SynapseError(400, "Room does not exist") - dir_handler = self.handlers.directory_handler + requester = yield self.auth.get_user_by_req(request) - try: - # try to auth as a user - requester = yield self.auth.get_user_by_req(request) - try: - user_id = requester.user.to_string() - yield dir_handler.create_association( - user_id, room_alias, room_id, servers - ) - yield dir_handler.send_room_alias_update_event( - requester, - user_id, - room_id - ) - except SynapseError as e: - raise e - except Exception: - logger.exception("Failed to create association") - raise - except AuthError: - # try to auth as an application service - service = yield self.auth.get_appservice_by_req(request) - yield dir_handler.create_appservice_association( - service, room_alias, room_id, servers - ) - logger.info( - "Application service at %s created alias %s pointing to %s", - service.url, - room_alias.to_string(), - room_id - ) + yield self.handlers.directory_handler.create_association( + requester, room_alias, room_id, servers + ) defer.returnValue((200, {})) @@ -135,7 +108,7 @@ class ClientDirectoryServer(ClientV1RestServlet): room_alias = RoomAlias.from_string(room_alias) yield dir_handler.delete_association( - requester, user.to_string(), room_alias + requester, room_alias ) logger.info( -- cgit 1.4.1 From 0d31109ed58d39a0234471c9067115a3652cc1e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 10:14:29 +0100 Subject: Newsfile --- changelog.d/4063.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4063.misc diff --git a/changelog.d/4063.misc b/changelog.d/4063.misc new file mode 100644 index 0000000000..677fcb90ad --- /dev/null +++ b/changelog.d/4063.misc @@ -0,0 +1 @@ +Refactor room alias creation code -- cgit 1.4.1 From 084046456ec88588779a62f9378c1a8e911bfc7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:14:04 +0100 Subject: Add config option to control alias creation --- synapse/config/homeserver.py | 3 +- synapse/config/room_directory.py | 101 ++++++++++++++++++++++++++++++++ synapse/federation/federation_server.py | 16 +---- synapse/handlers/directory.py | 9 +++ synapse/util/__init__.py | 21 +++++++ 5 files changed, 135 insertions(+), 15 deletions(-) create mode 100644 synapse/config/room_directory.py diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index b8d5690f2b..10dd40159f 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -31,6 +31,7 @@ from .push import PushConfig from .ratelimiting import RatelimitConfig from .registration import RegistrationConfig from .repository import ContentRepositoryConfig +from .room_directory import RoomDirectoryConfig from .saml2 import SAML2Config from .server import ServerConfig from .server_notices_config import ServerNoticesConfig @@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, WorkerConfig, PasswordAuthProviderConfig, PushConfig, SpamCheckerConfig, GroupsConfig, UserDirectoryConfig, ConsentConfig, - ServerNoticesConfig, + ServerNoticesConfig, RoomDirectoryConfig, ): pass diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py new file mode 100644 index 0000000000..41ef3217e8 --- /dev/null +++ b/synapse/config/room_directory.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util import glob_to_regex + +from ._base import Config, ConfigError + + +class RoomDirectoryConfig(Config): + def read_config(self, config): + alias_creation_rules = config["alias_creation_rules"] + + self._alias_creation_rules = [ + _AliasRule(rule) + for rule in alias_creation_rules + ] + + def default_config(self, config_dir_path, server_name, **kwargs): + return """ + # The `alias_creation` option controls who's allowed to create aliases + # on this server. + # + # The format of this option is a list of rules that contain globs that + # match against user_id and the new alias (fully qualified with server + # name). The action in the first rule that matches is taken, which can + # currently either be "allowed" or "denied". + # + # If no rules match the request is denied. + alias_creation_rules: + - user_id: "*" + alias: "*" + action: allowed + """ + + def is_alias_creation_allowed(self, user_id, alias): + """Checks if the given user is allowed to create the given alias + + Args: + user_id (str) + alias (str) + + Returns: + boolean: True if user is allowed to crate the alias + """ + for rule in self._alias_creation_rules: + if rule.matches(user_id, alias): + return rule.action == "allowed" + + return False + + +class _AliasRule(object): + def __init__(self, rule): + action = rule["action"] + user_id = rule["user_id"] + alias = rule["alias"] + + if action in ("allowed", "denied"): + self.action = action + else: + raise ConfigError( + "alias_creation_rules rules can only have action of 'allowed'" + " or 'denied'" + ) + + try: + self._user_id_regex = glob_to_regex(user_id) + self._alias_regex = glob_to_regex(alias) + except Exception as e: + raise ConfigError("Failed to parse glob into regex: %s", e) + + def matches(self, user_id, alias): + """Tests if this rule matches the given user_id and alias. + + Args: + user_id (str) + alias (str) + + Returns: + boolean + """ + + if not self._user_id_regex.search(user_id): + return False + + if not self._alias_regex.search(alias): + return False + + return True diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 4efe95faa4..d041c26824 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re import six from six import iteritems @@ -44,6 +43,7 @@ from synapse.replication.http.federation import ( ReplicationGetQueryRestServlet, ) from synapse.types import get_domain_from_id +from synapse.util import glob_to_regex from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache from synapse.util.logcontext import nested_logging_context @@ -729,22 +729,10 @@ def _acl_entry_matches(server_name, acl_entry): if not isinstance(acl_entry, six.string_types): logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)) return False - regex = _glob_to_regex(acl_entry) + regex = glob_to_regex(acl_entry) return regex.match(server_name) -def _glob_to_regex(glob): - res = '' - for c in glob: - if c == '*': - res = res + '.*' - elif c == '?': - res = res + '.' - else: - res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) - - class FederationHandlerRegistry(object): """Allows classes to register themselves as handlers for a given EDU or query type for incoming federation traffic. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 02f12f6645..7d67bf803a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -43,6 +43,7 @@ class DirectoryHandler(BaseHandler): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.config = hs.config self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -111,6 +112,14 @@ class DirectoryHandler(BaseHandler): 403, "This user is not permitted to create this alias", ) + if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()): + # Lets just return a generic message, as there may be all sorts of + # reasons why we said no. TODO: Allow configurable error messages + # per alias creation rule? + raise SynapseError( + 403, "Not allowed to create alias", + ) + can_create = yield self.can_modify_alias( room_alias, user_id=user_id diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9a8fae0497..163e4b35ff 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +import re from itertools import islice import attr @@ -138,3 +139,23 @@ def log_failure(failure, msg, consumeErrors=True): if not consumeErrors: return failure + + +def glob_to_regex(glob): + """Converts a glob to a compiled regex object + + Args: + glob (str) + + Returns: + re.RegexObject + """ + res = '' + for c in glob: + if c == '*': + res = res + '.*' + elif c == '?': + res = res + '.' + else: + res = res + re.escape(c) + return re.compile(res + "\\Z", re.IGNORECASE) -- cgit 1.4.1 From f9d6c677eac35c926339a904b1f0c8c9dbd9049a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:36:39 +0100 Subject: Newsfile --- changelog.d/4051.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4051.feature diff --git a/changelog.d/4051.feature b/changelog.d/4051.feature new file mode 100644 index 0000000000..9c1b3a72a0 --- /dev/null +++ b/changelog.d/4051.feature @@ -0,0 +1 @@ +Add config option to control alias creation -- cgit 1.4.1 From 9fafdfa97d87006177d13d4b80aeebfc4ded4bee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 14:21:09 +0100 Subject: Anchor returned regex to start and end of string --- synapse/util/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 163e4b35ff..0ae7e2ef3b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -142,7 +142,9 @@ def log_failure(failure, msg, consumeErrors=True): def glob_to_regex(glob): - """Converts a glob to a compiled regex object + """Converts a glob to a compiled regex object. + + The regex is anchored at the beginning and end of the string. Args: glob (str) @@ -158,4 +160,6 @@ def glob_to_regex(glob): res = res + '.' else: res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) + + # \A anchors at start of string, \Z at end of string + return re.compile(r"\A" + res + r"\Z", re.IGNORECASE) -- cgit 1.4.1 From 1b4bf232b9fd348a94b8bc4e9c851ed5b6d8e801 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:04:50 +0100 Subject: Add tests for config generation --- tests/config/test_room_directory.py | 67 +++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 tests/config/test_room_directory.py diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py new file mode 100644 index 0000000000..75021a5f04 --- /dev/null +++ b/tests/config/test_room_directory.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from synapse.config.room_directory import RoomDirectoryConfig + +from tests import unittest + + +class RoomDirectoryConfigTestCase(unittest.TestCase): + def test_alias_creation_acl(self): + config = yaml.load(""" + alias_creation_rules: + - user_id: "*bob*" + alias: "*" + action: "denied" + - user_id: "*" + alias: "#unofficial_*" + action: "allowed" + - user_id: "@foo*:example.com" + alias: "*" + action: "allowed" + - user_id: "@gah:example.com" + alias: "#goo:example.com" + action: "allowed" + """) + + rd_config = RoomDirectoryConfig() + rd_config.read_config(config) + + self.assertFalse(rd_config.is_alias_creation_allowed( + user_id="@bob:example.com", + alias="#test:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@test:example.com", + alias="#unofficial_st:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@foobar:example.com", + alias="#test:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@gah:example.com", + alias="#goo:example.com", + )) + + self.assertFalse(rd_config.is_alias_creation_allowed( + user_id="@test:example.com", + alias="#test:example.com", + )) -- cgit 1.4.1 From 3c580c2b47fab5f85c76a7061065e11b7d0eaeb8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:14:41 +0100 Subject: Add tests for alias creation rules --- tests/handlers/test_directory.py | 48 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index ec7355688b..4f299b74ba 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -18,7 +18,9 @@ from mock import Mock from twisted.internet import defer +from synapse.config.room_directory import RoomDirectoryConfig from synapse.handlers.directory import DirectoryHandler +from synapse.rest.client.v1 import directory, room from synapse.types import RoomAlias from tests import unittest @@ -102,3 +104,49 @@ class DirectoryTestCase(unittest.TestCase): ) self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) + + +class TestCreateAliasACL(unittest.HomeserverTestCase): + user_id = "@test:test" + + servlets = [directory.register_servlets, room.register_servlets] + + def prepare(self, hs, reactor, clock): + # We cheekily override the config to add custom alias creation rules + config = {} + config["alias_creation_rules"] = [ + { + "user_id": "*", + "alias": "#unofficial_*", + "action": "allowed", + } + ] + + rd_config = RoomDirectoryConfig() + rd_config.read_config(config) + + self.hs.config.is_alias_creation_allowed = rd_config.is_alias_creation_allowed + + return hs + + def test_denied(self): + room_id = self.helper.create_room_as(self.user_id) + + request, channel = self.make_request( + "PUT", + b"directory/room/%23test%3Atest", + ('{"room_id":"%s"}' % (room_id,)).encode('ascii'), + ) + self.render(request) + self.assertEquals(403, channel.code, channel.result) + + def test_allowed(self): + room_id = self.helper.create_room_as(self.user_id) + + request, channel = self.make_request( + "PUT", + b"directory/room/%23unofficial_test%3Atest", + ('{"room_id":"%s"}' % (room_id,)).encode('ascii'), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) -- cgit 1.4.1 From b69216f7680b92ccd8f92e618e1a81c9ba3c3346 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 21:45:45 +1100 Subject: Make the metrics less racy (#4061) --- changelog.d/4061.bugfix | 1 + synapse/http/request_metrics.py | 31 ++++++++++++++++++------------- synapse/notifier.py | 6 +++--- 3 files changed, 22 insertions(+), 16 deletions(-) create mode 100644 changelog.d/4061.bugfix diff --git a/changelog.d/4061.bugfix b/changelog.d/4061.bugfix new file mode 100644 index 0000000000..94ffcf7a51 --- /dev/null +++ b/changelog.d/4061.bugfix @@ -0,0 +1 @@ +Fix some metrics being racy and causing exceptions when polled by Prometheus. diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index fedb4e6b18..62045a918b 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -39,7 +39,8 @@ outgoing_responses_counter = Counter( ) response_timer = Histogram( - "synapse_http_server_response_time_seconds", "sec", + "synapse_http_server_response_time_seconds", + "sec", ["method", "servlet", "tag", "code"], ) @@ -79,15 +80,11 @@ response_size = Counter( # than when the response was written. in_flight_requests_ru_utime = Counter( - "synapse_http_server_in_flight_requests_ru_utime_seconds", - "", - ["method", "servlet"], + "synapse_http_server_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"] ) in_flight_requests_ru_stime = Counter( - "synapse_http_server_in_flight_requests_ru_stime_seconds", - "", - ["method", "servlet"], + "synapse_http_server_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"] ) in_flight_requests_db_txn_count = Counter( @@ -134,7 +131,7 @@ def _get_in_flight_counts(): # type counts = {} for rm in reqs: - key = (rm.method, rm.name,) + key = (rm.method, rm.name) counts[key] = counts.get(key, 0) + 1 return counts @@ -175,7 +172,8 @@ class RequestMetrics(object): if context != self.start_context: logger.warn( "Context have unexpectedly changed %r, %r", - context, self.start_context + context, + self.start_context, ) return @@ -192,10 +190,10 @@ class RequestMetrics(object): resource_usage = context.get_resource_usage() response_ru_utime.labels(self.method, self.name, tag).inc( - resource_usage.ru_utime, + resource_usage.ru_utime ) response_ru_stime.labels(self.method, self.name, tag).inc( - resource_usage.ru_stime, + resource_usage.ru_stime ) response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count @@ -222,8 +220,15 @@ class RequestMetrics(object): diff = new_stats - self._request_stats self._request_stats = new_stats - in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) - in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) + # max() is used since rapid use of ru_stime/ru_utime can end up with the + # count going backwards due to NTP, time smearing, fine-grained + # correction, or floating points. Who knows, really? + in_flight_requests_ru_utime.labels(self.method, self.name).inc( + max(diff.ru_utime, 0) + ) + in_flight_requests_ru_stime.labels(self.method, self.name).inc( + max(diff.ru_stime, 0) + ) in_flight_requests_db_txn_count.labels(self.method, self.name).inc( diff.db_txn_count diff --git a/synapse/notifier.py b/synapse/notifier.py index 340b16ce25..de02b1017e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -186,9 +186,9 @@ class Notifier(object): def count_listeners(): all_user_streams = set() - for x in self.room_to_user_streams.values(): + for x in list(self.room_to_user_streams.values()): all_user_streams |= x - for x in self.user_to_user_stream.values(): + for x in list(self.user_to_user_stream.values()): all_user_streams.add(x) return sum(stream.count_listeners() for stream in all_user_streams) @@ -196,7 +196,7 @@ class Notifier(object): LaterGauge( "synapse_notifier_rooms", "", [], - lambda: count(bool, self.room_to_user_streams.values()), + lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( "synapse_notifier_users", "", [], -- cgit 1.4.1 From e404ba9aacb426efc7d815cadfabd54dcf576e3b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 22:26:00 +1100 Subject: Fix manhole on py3 (pt 2) (#4067) --- changelog.d/4067.bugfix | 1 + synapse/util/manhole.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/4067.bugfix diff --git a/changelog.d/4067.bugfix b/changelog.d/4067.bugfix new file mode 100644 index 0000000000..78d69a8819 --- /dev/null +++ b/changelog.d/4067.bugfix @@ -0,0 +1 @@ +Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index cf43ab6a19..9cb7e9c9ab 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -70,6 +70,8 @@ def manhole(username, password, globals): Returns: twisted.internet.protocol.Factory: A factory to pass to ``listenTCP`` """ + if not isinstance(password, bytes): + password = password.encode('ascii') checker = checkers.InMemoryUsernamePasswordDatabaseDontUse( **{username: password} -- cgit 1.4.1 From cc325c7069599ac0f9b8c31aa3a472d5ea041a4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Oct 2018 14:01:59 +0100 Subject: Calculate absolute path for email templates --- changelog.d/4068.bugfix | 1 + synapse/config/emailconfig.py | 40 ++++++++++++++++++---------------------- synapse/push/mailer.py | 8 ++------ 3 files changed, 21 insertions(+), 28 deletions(-) create mode 100644 changelog.d/4068.bugfix diff --git a/changelog.d/4068.bugfix b/changelog.d/4068.bugfix new file mode 100644 index 0000000000..74bda7491f --- /dev/null +++ b/changelog.d/4068.bugfix @@ -0,0 +1 @@ +Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. \ No newline at end of file diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index e2582cfecc..93d70cff14 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -19,18 +19,12 @@ from __future__ import print_function import email.utils import logging import os -import sys -import textwrap -from ._base import Config +import pkg_resources -logger = logging.getLogger(__name__) +from ._base import Config, ConfigError -TEMPLATE_DIR_WARNING = """\ -WARNING: The email notifier is configured to look for templates in '%(template_dir)s', -but no templates could be found there. We will fall back to using the example templates; -to get rid of this warning, leave 'email.template_dir' unset. -""" +logger = logging.getLogger(__name__) class EmailConfig(Config): @@ -78,20 +72,22 @@ class EmailConfig(Config): self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_template_dir = email_config.get("template_dir") - - # backwards-compatibility hack - if ( - self.email_template_dir == "res/templates" - and not os.path.isfile( - os.path.join(self.email_template_dir, self.email_notif_template_text) + template_dir = email_config.get("template_dir") + # we need an absolute path, because we change directory after starting (and + # we don't yet know what auxilliary templates like mail.css we will need). + # (Note that loading as package_resources with jinja.PackageLoader doesn't + # work for the same reason.) + if not template_dir: + template_dir = pkg_resources.resource_filename( + 'synapse', 'res/templates' ) - ): - t = TEMPLATE_DIR_WARNING % { - "template_dir": self.email_template_dir, - } - print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) - self.email_template_dir = None + template_dir = os.path.abspath(template_dir) + + for f in self.email_notif_template_text, self.email_notif_template_html: + p = os.path.join(template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find email template file %s" % (p, )) + self.email_template_dir = template_dir self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b9dcfee740..16fb5e8471 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -526,12 +526,8 @@ def load_jinja2_templates(config): Returns: (notif_template_html, notif_template_text) """ - logger.info("loading jinja2") - - if config.email_template_dir: - loader = jinja2.FileSystemLoader(config.email_template_dir) - else: - loader = jinja2.PackageLoader('synapse', 'res/templates') + logger.info("loading email templates from '%s'", config.email_template_dir) + loader = jinja2.FileSystemLoader(config.email_template_dir) env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) -- cgit 1.4.1 From 47a9da28caa6a4f27d2df31f043971a2c9c7b555 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 20:43:18 +0100 Subject: Batch process handling state groups --- synapse/storage/events.py | 81 +++++++++------------------------ synapse/storage/state.py | 112 +++++++++++++++++++++++++++++----------------- 2 files changed, 92 insertions(+), 101 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0fb190530a..e4d0f8b1a9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -37,6 +37,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore +from synapse.storage.state import StateGroupWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -203,7 +204,8 @@ def _retry_on_integrity_error(func): # inherits from EventFederationStore so that we can call _update_backward_extremities # and _handle_mult_prev_events (though arguably those could both be moved in here) -class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore): +class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore, + BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" @@ -1995,70 +1997,29 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding redundant state groups") - # Get all state groups that are only referenced by events that are - # to be deleted. - # This works by first getting state groups that we may want to delete, - # joining against event_to_state_groups to get events that use that - # state group, then left joining against events_to_purge again. Any - # state group where the left join produce *no nulls* are referenced - # only by events that are going to be purged. + # Get all state groups that are referenced by events that are to be + # deleted. We then go and check if they are referenced by other events + # or state groups, and if not we delete them. txn.execute(""" - SELECT state_group FROM - ( - SELECT DISTINCT state_group FROM events_to_purge - INNER JOIN event_to_state_groups USING (event_id) - ) AS sp - INNER JOIN event_to_state_groups USING (state_group) - LEFT JOIN events_to_purge AS ep USING (event_id) - GROUP BY state_group - HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0 + SELECT DISTINCT state_group FROM events_to_purge + INNER JOIN event_to_state_groups USING (event_id) """) - state_rows = txn.fetchall() - logger.info("[purge] found %i redundant state groups", len(state_rows)) - - # make a set of the redundant state groups, so that we can look them up - # efficiently - state_groups_to_delete = set([sg for sg, in state_rows]) - - # Now we get all the state groups that rely on these state groups - logger.info("[purge] finding state groups which depend on redundant" - " state groups") - remaining_state_groups = [] - unreferenced_state_groups = 0 - for i in range(0, len(state_rows), 100): - chunk = [sg for sg, in state_rows[i:i + 100]] - # look for state groups whose prev_state_group is one we are about - # to delete - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=chunk, - retcols=["state_group"], - keyvalues={}, - ) - - for row in rows: - sg = row["state_group"] - - if sg in state_groups_to_delete: - # exclude state groups we are about to delete: no point in - # updating them - continue + referenced_state_groups = set(sg for sg, in txn) + logger.info( + "[purge] found %i referenced state groups", + len(referenced_state_groups), + ) - if not self._is_state_group_referenced(txn, sg): - # Let's also delete unreferenced state groups while we're - # here, since otherwise we'd need to de-delta them - state_groups_to_delete.add(sg) - unreferenced_state_groups += 1 - continue + logger.info("[purge] finding state groups that can be deleted") - remaining_state_groups.append(sg) + state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( + txn, referenced_state_groups, + ) logger.info( - "[purge] found %i extra unreferenced state groups to delete", - unreferenced_state_groups, + "[purge] found %i state groups to delete", + len(state_groups_to_delete), ) logger.info( @@ -2109,11 +2070,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] removing redundant state groups") txn.executemany( "DELETE FROM state_groups_state WHERE state_group = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) txn.executemany( "DELETE FROM state_groups WHERE id = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) logger.info("[purge] removing events from event_to_state_groups") diff --git a/synapse/storage/state.py b/synapse/storage/state.py index f7cf5c86c9..0f86311ed4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,55 +1041,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _is_state_group_referenced(self, txn, state_group): - """Checks if a given state group is referenced, or is safe to delete. + def _find_unreferenced_groups(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). - A state group is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a state_group whose chain of - prev_groups includes the given state_group.) - """ - - # We check this by doing a depth first search to look for any - # descendant referenced by `event_to_state_groups`. - - # State groups we need to check, contains state groups that are - # descendants of `state_group` - state_groups_to_search = [state_group] - - # Set of state groups we've already checked - state_groups_searched = set() - - while state_groups_to_search: - state_group = state_groups_to_search.pop() # Next state group to check + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. - is_referenced = self._simple_select_one_onecol_txn( + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + lst = list(next_to_search) + current_search = set(lst[:100]) + next_to_search = set(lst[100:]) + + # Check if state groups are referenced + sql = """ + SELECT state_group, count(*) FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + GROUP BY state_group + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) + + referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + rows = self._simple_select_many_txn( txn, - table="event_to_state_groups", - keyvalues={"state_group": state_group}, - retcol="event_id", - allow_none=True, + table="state_group_edges", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), ) - if is_referenced: - # A descendant is referenced by event_to_state_groups, so - # original state group is referenced. - return True - state_groups_searched.add(state_group) + next_to_search.update(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + next_to_search -= state_groups_seen + state_groups_seen |= next_to_search - # Find all children of current state group and add to search - references = self._simple_select_onecol_txn( - txn, - table="state_group_edges", - keyvalues={"prev_state_group": state_group}, - retcol="state_group", - ) - state_groups_to_search.extend(references) + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups - # Lets be paranoid and check for cycles - if state_groups_searched.intersection(references): - raise Exception("State group %s has cyclic dependency", state_group) + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) - return False + return to_delete, to_dedelta class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): -- cgit 1.4.1 From 67f7b9cb50c246226b94027e3c1d4b958ff9f840 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 16:06:59 +0100 Subject: pep8 --- synapse/storage/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index af822fb69d..379b5c514f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2041,7 +2041,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore INNER JOIN event_to_state_groups USING (event_id) """) - referenced_state_groups = set(sg for sg, in txn) + referenced_state_groups = set(sg for sg, in txn) logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups), -- cgit 1.4.1 From eba48c0f16d08d5806da73d0b51d62de6aaee118 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 19:58:28 +0000 Subject: Add Caddy example to README --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index e1ea351f84..de133f674d 100644 --- a/README.rst +++ b/README.rst @@ -652,6 +652,7 @@ Using a reverse proxy with Synapse It is recommended to put a reverse proxy such as `nginx `_, `Apache `_ or +`Caddy `_ or `HAProxy `_ in front of Synapse. One advantage of doing so is that it means that you can expose the default https port (443) to Matrix clients without needing to run Synapse with root privileges. @@ -682,6 +683,11 @@ so an example nginx configuration might look like:: } } +an example caddy configuration may look like:: + proxy /_matrix http://localhost:8008 { + transparent + } + and an example apache configuration may look like:: -- cgit 1.4.1 From 593389a077e41a5db78d4f55210909261b8e19e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Oct 2018 21:35:57 +0100 Subject: Remove notes on fallback for email_templates This fallback didn't work, and was removed in #4069. --- CHANGES.md | 4 ---- UPGRADE.rst | 4 ---- 2 files changed, 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5f598559a0..fb98c934c0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,10 +11,6 @@ If you have email notifications enabled, you should ensure that have installed customised templates, or leave it unset to use the default templates. -The configuration parser will try to detect the situation where -`email.template_dir` is incorrectly set to `res/templates` and do the right -thing, but will warn about this. - Synapse 0.33.7rc2 (2018-10-17) ============================== diff --git a/UPGRADE.rst b/UPGRADE.rst index 201d298123..55c77eedde 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -61,10 +61,6 @@ If you have email notifications enabled, you should ensure that have installed customised templates, or leave it unset to use the default templates. -The configuration parser will try to detect the situation where -``email.template_dir`` is incorrectly set to ``res/templates`` and do the right -thing, but will warn about this. - Upgrading to v0.27.3 ==================== -- cgit 1.4.1 From e5b52d0f945962a5f4bef5e7a2fb6445dfc0d8f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Oct 2018 21:49:26 +0100 Subject: Make psutil an explicit dependency As of #4027, we require psutil to be installed, so it should be in our dependency list. We can also remove some of the conditional import code introduced by #992. Fixes #4062. --- changelog.d/4073.misc | 1 + synapse/app/homeserver.py | 11 ++++------- synapse/python_dependencies.py | 4 +--- 3 files changed, 6 insertions(+), 10 deletions(-) create mode 100644 changelog.d/4073.misc diff --git a/changelog.d/4073.misc b/changelog.d/4073.misc new file mode 100644 index 0000000000..fc304bef06 --- /dev/null +++ b/changelog.d/4073.misc @@ -0,0 +1 @@ +Add psutil as an explicit dependency diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..a0c0fc78ec 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -20,6 +20,7 @@ import sys from six import iteritems +import psutil from prometheus_client import Gauge from twisted.application import service @@ -502,7 +503,6 @@ def run(hs): def performance_stats_init(): try: - import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. @@ -510,12 +510,9 @@ def run(hs): process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) - except (ImportError, AttributeError): - logger.warn( - "report_stats enabled but psutil is not installed or incorrect version." - " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix.org track performance" - " changes across releases." + except (AttributeError): + logger.warning( + "Unable to read memory/cpu stats. Disabling reporting." ) def generate_user_daily_visit_stats(): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f51184b50d..943876456b 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -53,6 +53,7 @@ REQUIREMENTS = { "pillow>=3.1.2": ["PIL"], "pydenticon>=0.2": ["pydenticon"], "sortedcontainers>=1.4.4": ["sortedcontainers"], + "psutil>=2.0.0": ["psutil>=2.0.0"], "pysaml2>=3.0.0": ["saml2"], "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], "msgpack-python>=0.4.2": ["msgpack"], @@ -79,9 +80,6 @@ CONDITIONAL_REQUIREMENTS = { "matrix-synapse-ldap3": { "matrix-synapse-ldap3>=0.1": ["ldap_auth_provider"], }, - "psutil": { - "psutil>=2.0.0": ["psutil>=2.0.0"], - }, "postgres": { "psycopg2>=2.6": ["psycopg2"] } -- cgit 1.4.1 From b85fe45f46abd7ff1c42851ab6dbb2f4ff33758f Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:55:38 +0000 Subject: Add CL --- changelog.d/4072.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4072.misc diff --git a/changelog.d/4072.misc b/changelog.d/4072.misc new file mode 100644 index 0000000000..9d7279fd2b --- /dev/null +++ b/changelog.d/4072.misc @@ -0,0 +1 @@ +The README now contains example for the Caddy web server. Contributed by steamp0rt. -- cgit 1.4.1 From 08760b0d9ac7d1ecf48b6aaef662cf346da7368d Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:57:28 +0000 Subject: Fix formatting. --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index de133f674d..bb99f4fe2b 100644 --- a/README.rst +++ b/README.rst @@ -684,6 +684,7 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: + proxy /_matrix http://localhost:8008 { transparent } -- cgit 1.4.1 From 9c2f99a3b74ebd84c14314344bf7ff785b8ff1d1 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:59:14 +0000 Subject: Fix --- README.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index bb99f4fe2b..b42a96a34c 100644 --- a/README.rst +++ b/README.rst @@ -684,9 +684,10 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: - - proxy /_matrix http://localhost:8008 { - transparent + example.com { + proxy /_matrix http://localhost:8008 { + transparent + } } and an example apache configuration may look like:: -- cgit 1.4.1 From 3f357583ce17e0816acbc373bc2179293212ba4e Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:59:39 +0000 Subject: I HATE RST --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index b42a96a34c..df9430f4cc 100644 --- a/README.rst +++ b/README.rst @@ -684,6 +684,7 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: + example.com { proxy /_matrix http://localhost:8008 { transparent -- cgit 1.4.1 From 5c3d6ea9c72125ad152823e73f52667e845e6a61 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 22:00:27 +0000 Subject: Whoops! --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index df9430f4cc..209313ba37 100644 --- a/README.rst +++ b/README.rst @@ -685,7 +685,7 @@ so an example nginx configuration might look like:: an example caddy configuration may look like:: - example.com { + matrix.example.com { proxy /_matrix http://localhost:8008 { transparent } -- cgit 1.4.1 From e1728dfcbe585edfb590bce50adeaab341a70db8 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 20 Oct 2018 11:16:55 +1100 Subject: Make scripts/ and scripts-dev/ pass pyflakes (and the rest of the codebase on py3) (#4068) --- .travis.yml | 2 +- changelog.d/4068.misc | 1 + scripts-dev/check_auth.py | 36 ++-- scripts-dev/check_event_hash.py | 32 ++-- scripts-dev/check_signature.py | 36 ++-- scripts-dev/convert_server_keys.py | 40 +++-- scripts-dev/definitions.py | 54 +++--- scripts-dev/dump_macaroon.py | 13 +- scripts-dev/federation_client.py | 99 +++++------ scripts-dev/hash_history.py | 62 ++++--- scripts-dev/list_url_patterns.py | 16 +- scripts-dev/tail-synapse.py | 24 +-- scripts/hash_password | 5 +- scripts/move_remote_media_to_new_store.py | 36 ++-- scripts/register_new_matrix_user | 76 ++++---- scripts/synapse_port_db | 272 +++++++++++++---------------- synapse/app/homeserver.py | 2 +- synapse/config/__main__.py | 2 +- synapse/config/_base.py | 121 +++++++------ synapse/storage/_base.py | 4 +- synapse/storage/keys.py | 2 +- synapse/storage/pusher.py | 2 +- synapse/storage/signatures.py | 2 +- synapse/storage/transactions.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 +- synctl | 80 ++++----- tox.ini | 4 +- 27 files changed, 511 insertions(+), 518 deletions(-) create mode 100644 changelog.d/4068.misc diff --git a/.travis.yml b/.travis.yml index 2077f6af72..7ee1a278db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ matrix: - python: 2.7 env: TOX_ENV=packaging - - python: 2.7 + - python: 3.6 env: TOX_ENV=pep8 - python: 2.7 diff --git a/changelog.d/4068.misc b/changelog.d/4068.misc new file mode 100644 index 0000000000..db6c4ade59 --- /dev/null +++ b/changelog.d/4068.misc @@ -0,0 +1 @@ +Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. diff --git a/scripts-dev/check_auth.py b/scripts-dev/check_auth.py index 4fa8792a5f..b3d11f49ec 100644 --- a/scripts-dev/check_auth.py +++ b/scripts-dev/check_auth.py @@ -1,21 +1,20 @@ -from synapse.events import FrozenEvent -from synapse.api.auth import Auth - -from mock import Mock +from __future__ import print_function import argparse import itertools import json import sys +from mock import Mock + +from synapse.api.auth import Auth +from synapse.events import FrozenEvent + def check_auth(auth, auth_chain, events): auth_chain.sort(key=lambda e: e.depth) - auth_map = { - e.event_id: e - for e in auth_chain - } + auth_map = {e.event_id: e for e in auth_chain} create_events = {} for e in auth_chain: @@ -25,31 +24,26 @@ def check_auth(auth, auth_chain, events): for e in itertools.chain(auth_chain, events): auth_events_list = [auth_map[i] for i, _ in e.auth_events] - auth_events = { - (e.type, e.state_key): e - for e in auth_events_list - } + auth_events = {(e.type, e.state_key): e for e in auth_events_list} auth_events[("m.room.create", "")] = create_events[e.room_id] try: auth.check(e, auth_events=auth_events) except Exception as ex: - print "Failed:", e.event_id, e.type, e.state_key - print "Auth_events:", auth_events - print ex - print json.dumps(e.get_dict(), sort_keys=True, indent=4) + print("Failed:", e.event_id, e.type, e.state_key) + print("Auth_events:", auth_events) + print(ex) + print(json.dumps(e.get_dict(), sort_keys=True, indent=4)) # raise - print "Success:", e.event_id, e.type, e.state_key + print("Success:", e.event_id, e.type, e.state_key) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( - 'json', - nargs='?', - type=argparse.FileType('r'), - default=sys.stdin, + 'json', nargs='?', type=argparse.FileType('r'), default=sys.stdin ) args = parser.parse_args() diff --git a/scripts-dev/check_event_hash.py b/scripts-dev/check_event_hash.py index 7ccae34d48..8535f99697 100644 --- a/scripts-dev/check_event_hash.py +++ b/scripts-dev/check_event_hash.py @@ -1,10 +1,15 @@ -from synapse.crypto.event_signing import * -from unpaddedbase64 import encode_base64 - import argparse import hashlib -import sys import json +import logging +import sys + +from unpaddedbase64 import encode_base64 + +from synapse.crypto.event_signing import ( + check_event_content_hash, + compute_event_reference_hash, +) class dictobj(dict): @@ -24,27 +29,26 @@ class dictobj(dict): def main(): parser = argparse.ArgumentParser() - parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'), - default=sys.stdin) + parser.add_argument( + "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin + ) args = parser.parse_args() logging.basicConfig() event_json = dictobj(json.load(args.input_json)) - algorithms = { - "sha256": hashlib.sha256, - } + algorithms = {"sha256": hashlib.sha256} for alg_name in event_json.hashes: if check_event_content_hash(event_json, algorithms[alg_name]): - print "PASS content hash %s" % (alg_name,) + print("PASS content hash %s" % (alg_name,)) else: - print "FAIL content hash %s" % (alg_name,) + print("FAIL content hash %s" % (alg_name,)) for algorithm in algorithms.values(): name, h_bytes = compute_event_reference_hash(event_json, algorithm) - print "Reference hash %s: %s" % (name, encode_base64(h_bytes)) + print("Reference hash %s: %s" % (name, encode_base64(h_bytes))) -if __name__=="__main__": - main() +if __name__ == "__main__": + main() diff --git a/scripts-dev/check_signature.py b/scripts-dev/check_signature.py index 079577908a..612f17ca7f 100644 --- a/scripts-dev/check_signature.py +++ b/scripts-dev/check_signature.py @@ -1,15 +1,15 @@ -from signedjson.sign import verify_signed_json -from signedjson.key import decode_verify_key_bytes, write_signing_keys -from unpaddedbase64 import decode_base64 - -import urllib2 +import argparse import json +import logging import sys +import urllib2 + import dns.resolver -import pprint -import argparse -import logging +from signedjson.key import decode_verify_key_bytes, write_signing_keys +from signedjson.sign import verify_signed_json +from unpaddedbase64 import decode_base64 + def get_targets(server_name): if ":" in server_name: @@ -23,6 +23,7 @@ def get_targets(server_name): except dns.resolver.NXDOMAIN: yield (server_name, 8448) + def get_server_keys(server_name, target, port): url = "https://%s:%i/_matrix/key/v1" % (target, port) keys = json.load(urllib2.urlopen(url)) @@ -33,12 +34,14 @@ def get_server_keys(server_name, target, port): verify_keys[key_id] = verify_key return verify_keys + def main(): parser = argparse.ArgumentParser() parser.add_argument("signature_name") - parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'), - default=sys.stdin) + parser.add_argument( + "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin + ) args = parser.parse_args() logging.basicConfig() @@ -48,24 +51,23 @@ def main(): for target, port in get_targets(server_name): try: keys = get_server_keys(server_name, target, port) - print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port) + print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port)) write_signing_keys(sys.stdout, keys.values()) break - except: + except Exception: logging.exception("Error talking to %s:%s", target, port) json_to_check = json.load(args.input_json) - print "Checking JSON:" + print("Checking JSON:") for key_id in json_to_check["signatures"][args.signature_name]: try: key = keys[key_id] verify_signed_json(json_to_check, args.signature_name, key) - print "PASS %s" % (key_id,) - except: + print("PASS %s" % (key_id,)) + except Exception: logging.exception("Check for key %s failed" % (key_id,)) - print "FAIL %s" % (key_id,) + print("FAIL %s" % (key_id,)) if __name__ == '__main__': main() - diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py index 151551f22c..dde8596697 100644 --- a/scripts-dev/convert_server_keys.py +++ b/scripts-dev/convert_server_keys.py @@ -1,13 +1,21 @@ -import psycopg2 -import yaml -import sys +import hashlib import json +import sys import time -import hashlib -from unpaddedbase64 import encode_base64 + +import six + +import psycopg2 +import yaml +from canonicaljson import encode_canonical_json from signedjson.key import read_signing_keys from signedjson.sign import sign_json -from canonicaljson import encode_canonical_json +from unpaddedbase64 import encode_base64 + +if six.PY2: + db_type = six.moves.builtins.buffer +else: + db_type = memoryview def select_v1_keys(connection): @@ -39,7 +47,9 @@ def select_v2_json(connection): cursor.close() results = {} for server_name, key_id, key_json in rows: - results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8")) + results.setdefault(server_name, {})[key_id] = json.loads( + str(key_json).decode("utf-8") + ) return results @@ -47,10 +57,7 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate): return { "old_verify_keys": {}, "server_name": server_name, - "verify_keys": { - key_id: {"key": key} - for key_id, key in keys.items() - }, + "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()}, "valid_until_ts": valid_until, "tls_fingerprints": [fingerprint(certificate)], } @@ -65,7 +72,7 @@ def rows_v2(server, json): valid_until = json["valid_until_ts"] key_json = encode_canonical_json(json) for key_id in json["verify_keys"]: - yield (server, key_id, "-", valid_until, valid_until, buffer(key_json)) + yield (server, key_id, "-", valid_until, valid_until, db_type(key_json)) def main(): @@ -87,7 +94,7 @@ def main(): result = {} for server in keys: - if not server in json: + if server not in json: v2_json = convert_v1_to_v2( server, valid_until, keys[server], certificates[server] ) @@ -96,10 +103,7 @@ def main(): yaml.safe_dump(result, sys.stdout, default_flow_style=False) - rows = list( - row for server, json in result.items() - for row in rows_v2(server, json) - ) + rows = list(row for server, json in result.items() for row in rows_v2(server, json)) cursor = connection.cursor() cursor.executemany( @@ -107,7 +111,7 @@ def main(): " server_name, key_id, from_server," " ts_added_ms, ts_valid_until_ms, key_json" ") VALUES (%s, %s, %s, %s, %s, %s)", - rows + rows, ) connection.commit() diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 47dac7772d..1deb0fe2b7 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -1,8 +1,16 @@ #! /usr/bin/python +from __future__ import print_function + +import argparse import ast +import os +import re +import sys + import yaml + class DefinitionVisitor(ast.NodeVisitor): def __init__(self): super(DefinitionVisitor, self).__init__() @@ -42,15 +50,18 @@ def non_empty(defs): functions = {name: non_empty(f) for name, f in defs['def'].items()} classes = {name: non_empty(f) for name, f in defs['class'].items()} result = {} - if functions: result['def'] = functions - if classes: result['class'] = classes + if functions: + result['def'] = functions + if classes: + result['class'] = classes names = defs['names'] uses = [] for name in names.get('Load', ()): if name not in names.get('Param', ()) and name not in names.get('Store', ()): uses.append(name) uses.extend(defs['attrs']) - if uses: result['uses'] = uses + if uses: + result['uses'] = uses result['names'] = names result['attrs'] = defs['attrs'] return result @@ -95,7 +106,6 @@ def used_names(prefix, item, defs, names): if __name__ == '__main__': - import sys, os, argparse, re parser = argparse.ArgumentParser(description='Find definitions.') parser.add_argument( @@ -105,24 +115,28 @@ if __name__ == '__main__': "--ignore", action="append", metavar="REGEXP", help="Ignore a pattern" ) parser.add_argument( - "--pattern", action="append", metavar="REGEXP", - help="Search for a pattern" + "--pattern", action="append", metavar="REGEXP", help="Search for a pattern" ) parser.add_argument( - "directories", nargs='+', metavar="DIR", - help="Directories to search for definitions" + "directories", + nargs='+', + metavar="DIR", + help="Directories to search for definitions", ) parser.add_argument( - "--referrers", default=0, type=int, - help="Include referrers up to the given depth" + "--referrers", + default=0, + type=int, + help="Include referrers up to the given depth", ) parser.add_argument( - "--referred", default=0, type=int, - help="Include referred down to the given depth" + "--referred", + default=0, + type=int, + help="Include referred down to the given depth", ) parser.add_argument( - "--format", default="yaml", - help="Output format, one of 'yaml' or 'dot'" + "--format", default="yaml", help="Output format, one of 'yaml' or 'dot'" ) args = parser.parse_args() @@ -162,7 +176,7 @@ if __name__ == '__main__': for used_by in entry.get("used", ()): referrers.add(used_by) for name, definition in names.items(): - if not name in referrers: + if name not in referrers: continue if ignore and any(pattern.match(name) for pattern in ignore): continue @@ -176,7 +190,7 @@ if __name__ == '__main__': for uses in entry.get("uses", ()): referred.add(uses) for name, definition in names.items(): - if not name in referred: + if name not in referred: continue if ignore and any(pattern.match(name) for pattern in ignore): continue @@ -185,12 +199,12 @@ if __name__ == '__main__': if args.format == 'yaml': yaml.dump(result, sys.stdout, default_flow_style=False) elif args.format == 'dot': - print "digraph {" + print("digraph {") for name, entry in result.items(): - print name + print(name) for used_by in entry.get("used", ()): if used_by in result: - print used_by, "->", name - print "}" + print(used_by, "->", name) + print("}") else: raise ValueError("Unknown format %r" % (args.format)) diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index fcc5568835..22b30fa78e 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,8 +1,11 @@ #!/usr/bin/env python2 -import pymacaroons +from __future__ import print_function + import sys +import pymacaroons + if len(sys.argv) == 1: sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],)) sys.exit(1) @@ -11,14 +14,14 @@ macaroon_string = sys.argv[1] key = sys.argv[2] if len(sys.argv) > 2 else None macaroon = pymacaroons.Macaroon.deserialize(macaroon_string) -print macaroon.inspect() +print(macaroon.inspect()) -print "" +print("") verifier = pymacaroons.Verifier() verifier.satisfy_general(lambda c: True) try: verifier.verify(macaroon, key) - print "Signature is correct" + print("Signature is correct") except Exception as e: - print str(e) + print(str(e)) diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index d2acc7654d..2566ce7cef 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -18,21 +18,21 @@ from __future__ import print_function import argparse +import base64 +import json +import sys from urlparse import urlparse, urlunparse import nacl.signing -import json -import base64 import requests -import sys - -from requests.adapters import HTTPAdapter import srvlookup import yaml +from requests.adapters import HTTPAdapter # uncomment the following to enable debug logging of http requests -#from httplib import HTTPConnection -#HTTPConnection.debuglevel = 1 +# from httplib import HTTPConnection +# HTTPConnection.debuglevel = 1 + def encode_base64(input_bytes): """Encode bytes as a base64 string without any padding.""" @@ -58,15 +58,15 @@ def decode_base64(input_string): def encode_canonical_json(value): return json.dumps( - value, - # Encode code-points outside of ASCII as UTF-8 rather than \u escapes - ensure_ascii=False, - # Remove unecessary white space. - separators=(',',':'), - # Sort the keys of dictionaries. - sort_keys=True, - # Encode the resulting unicode as UTF-8 bytes. - ).encode("UTF-8") + value, + # Encode code-points outside of ASCII as UTF-8 rather than \u escapes + ensure_ascii=False, + # Remove unecessary white space. + separators=(',', ':'), + # Sort the keys of dictionaries. + sort_keys=True, + # Encode the resulting unicode as UTF-8 bytes. + ).encode("UTF-8") def sign_json(json_object, signing_key, signing_name): @@ -88,6 +88,7 @@ def sign_json(json_object, signing_key, signing_name): NACL_ED25519 = "ed25519" + def decode_signing_key_base64(algorithm, version, key_base64): """Decode a base64 encoded signing key Args: @@ -143,14 +144,12 @@ def request_json(method, origin_name, origin_key, destination, path, content): authorization_headers = [] for key, sig in signed_json["signatures"][origin_name].items(): - header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( - origin_name, key, sig, - ) + header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig) authorization_headers.append(bytes(header)) - print ("Authorization: %s" % header, file=sys.stderr) + print("Authorization: %s" % header, file=sys.stderr) dest = "matrix://%s%s" % (destination, path) - print ("Requesting %s" % dest, file=sys.stderr) + print("Requesting %s" % dest, file=sys.stderr) s = requests.Session() s.mount("matrix://", MatrixConnectionAdapter()) @@ -158,10 +157,7 @@ def request_json(method, origin_name, origin_key, destination, path, content): result = s.request( method=method, url=dest, - headers={ - "Host": destination, - "Authorization": authorization_headers[0] - }, + headers={"Host": destination, "Authorization": authorization_headers[0]}, verify=False, data=content, ) @@ -171,50 +167,50 @@ def request_json(method, origin_name, origin_key, destination, path, content): def main(): parser = argparse.ArgumentParser( - description= - "Signs and sends a federation request to a matrix homeserver", + description="Signs and sends a federation request to a matrix homeserver" ) parser.add_argument( - "-N", "--server-name", + "-N", + "--server-name", help="Name to give as the local homeserver. If unspecified, will be " - "read from the config file.", + "read from the config file.", ) parser.add_argument( - "-k", "--signing-key-path", + "-k", + "--signing-key-path", help="Path to the file containing the private ed25519 key to sign the " - "request with.", + "request with.", ) parser.add_argument( - "-c", "--config", + "-c", + "--config", default="homeserver.yaml", help="Path to server config file. Ignored if --server-name and " - "--signing-key-path are both given.", + "--signing-key-path are both given.", ) parser.add_argument( - "-d", "--destination", + "-d", + "--destination", default="matrix.org", help="name of the remote homeserver. We will do SRV lookups and " - "connect appropriately.", + "connect appropriately.", ) parser.add_argument( - "-X", "--method", + "-X", + "--method", help="HTTP method to use for the request. Defaults to GET if --data is" - "unspecified, POST if it is." + "unspecified, POST if it is.", ) - parser.add_argument( - "--body", - help="Data to send as the body of the HTTP request" - ) + parser.add_argument("--body", help="Data to send as the body of the HTTP request") parser.add_argument( - "path", - help="request path. We will add '/_matrix/federation/v1/' to this." + "path", help="request path. We will add '/_matrix/federation/v1/' to this." ) args = parser.parse_args() @@ -227,13 +223,15 @@ def main(): result = request_json( args.method, - args.server_name, key, args.destination, + args.server_name, + key, + args.destination, "/_matrix/federation/v1/" + args.path, content=args.body, ) json.dump(result, sys.stdout) - print ("") + print("") def read_args_from_config(args): @@ -253,7 +251,7 @@ class MatrixConnectionAdapter(HTTPAdapter): return s, 8448 if ":" in s: - out = s.rsplit(":",1) + out = s.rsplit(":", 1) try: port = int(out[1]) except ValueError: @@ -263,7 +261,7 @@ class MatrixConnectionAdapter(HTTPAdapter): try: srv = srvlookup.lookup("matrix", "tcp", s)[0] return srv.host, srv.port - except: + except Exception: return s, 8448 def get_connection(self, url, proxies=None): @@ -272,10 +270,9 @@ class MatrixConnectionAdapter(HTTPAdapter): (host, port) = self.lookup(parsed.netloc) netloc = "%s:%d" % (host, port) print("Connecting to %s" % (netloc,), file=sys.stderr) - url = urlunparse(( - "https", netloc, parsed.path, parsed.params, parsed.query, - parsed.fragment, - )) + url = urlunparse( + ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) + ) return super(MatrixConnectionAdapter, self).get_connection(url, proxies) diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py index 616d6a10e7..514d80fa60 100644 --- a/scripts-dev/hash_history.py +++ b/scripts-dev/hash_history.py @@ -1,23 +1,31 @@ -from synapse.storage.pdu import PduStore -from synapse.storage.signatures import SignatureStore -from synapse.storage._base import SQLBaseStore -from synapse.federation.units import Pdu -from synapse.crypto.event_signing import ( - add_event_pdu_content_hash, compute_pdu_event_reference_hash -) -from synapse.api.events.utils import prune_pdu -from unpaddedbase64 import encode_base64, decode_base64 -from canonicaljson import encode_canonical_json +from __future__ import print_function + import sqlite3 import sys +from unpaddedbase64 import decode_base64, encode_base64 + +from synapse.crypto.event_signing import ( + add_event_pdu_content_hash, + compute_pdu_event_reference_hash, +) +from synapse.federation.units import Pdu +from synapse.storage._base import SQLBaseStore +from synapse.storage.pdu import PduStore +from synapse.storage.signatures import SignatureStore + + class Store(object): _get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"] _get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"] _get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"] - _get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"] + _get_pdu_origin_signatures_txn = SignatureStore.__dict__[ + "_get_pdu_origin_signatures_txn" + ] _store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"] - _store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"] + _store_pdu_reference_hash_txn = SignatureStore.__dict__[ + "_store_pdu_reference_hash_txn" + ] _store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"] _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"] @@ -26,9 +34,7 @@ store = Store() def select_pdus(cursor): - cursor.execute( - "SELECT pdu_id, origin FROM pdus ORDER BY depth ASC" - ) + cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC") ids = cursor.fetchall() @@ -41,23 +47,30 @@ def select_pdus(cursor): for pdu in pdus: try: if pdu.prev_pdus: - print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus + print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus) for pdu_id, origin, hashes in pdu.prev_pdus: ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)] hashes[ref_alg] = encode_base64(ref_hsh) - store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh) - print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus + store._store_prev_pdu_hash_txn( + cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh + ) + print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus) pdu = add_event_pdu_content_hash(pdu) ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu) reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh) - store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh) + store._store_pdu_reference_hash_txn( + cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh + ) for alg, hsh_base64 in pdu.hashes.items(): - print alg, hsh_base64 - store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)) + print(alg, hsh_base64) + store._store_pdu_content_hash_txn( + cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64) + ) + + except Exception: + print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus) - except: - print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus def main(): conn = sqlite3.connect(sys.argv[1]) @@ -65,5 +78,6 @@ def main(): select_pdus(cursor) conn.commit() -if __name__=='__main__': + +if __name__ == '__main__': main() diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index 58d40c4ff4..da027be26e 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -1,18 +1,17 @@ #! /usr/bin/python -import ast import argparse +import ast import os import sys + import yaml PATTERNS_V1 = [] PATTERNS_V2 = [] -RESULT = { - "v1": PATTERNS_V1, - "v2": PATTERNS_V2, -} +RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2} + class CallVisitor(ast.NodeVisitor): def visit_Call(self, node): @@ -21,7 +20,6 @@ class CallVisitor(ast.NodeVisitor): else: return - if name == "client_path_patterns": PATTERNS_V1.append(node.args[0].s) elif name == "client_v2_patterns": @@ -42,8 +40,10 @@ def find_patterns_in_file(filepath): parser = argparse.ArgumentParser(description='Find url patterns.') parser.add_argument( - "directories", nargs='+', metavar="DIR", - help="Directories to search for definitions" + "directories", + nargs='+', + metavar="DIR", + help="Directories to search for definitions", ) args = parser.parse_args() diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py index 18be711e92..1a36b94038 100644 --- a/scripts-dev/tail-synapse.py +++ b/scripts-dev/tail-synapse.py @@ -1,8 +1,9 @@ -import requests import collections +import json import sys import time -import json + +import requests Entry = collections.namedtuple("Entry", "name position rows") @@ -30,11 +31,11 @@ def parse_response(content): def replicate(server, streams): - return parse_response(requests.get( - server + "/_synapse/replication", - verify=False, - params=streams - ).content) + return parse_response( + requests.get( + server + "/_synapse/replication", verify=False, params=streams + ).content + ) def main(): @@ -45,7 +46,7 @@ def main(): try: streams = { row.name: row.position - for row in replicate(server, {"streams":"-1"})["streams"].rows + for row in replicate(server, {"streams": "-1"})["streams"].rows } except requests.exceptions.ConnectionError as e: time.sleep(0.1) @@ -53,8 +54,8 @@ def main(): while True: try: results = replicate(server, streams) - except: - sys.stdout.write("connection_lost("+ repr(streams) + ")\n") + except Exception: + sys.stdout.write("connection_lost(" + repr(streams) + ")\n") break for update in results.values(): for row in update.rows: @@ -62,6 +63,5 @@ def main(): streams[update.name] = update.position - -if __name__=='__main__': +if __name__ == '__main__': main() diff --git a/scripts/hash_password b/scripts/hash_password index 215ab25cfe..a62bb5aa83 100755 --- a/scripts/hash_password +++ b/scripts/hash_password @@ -1,12 +1,10 @@ #!/usr/bin/env python import argparse - +import getpass import sys import bcrypt -import getpass - import yaml bcrypt_rounds=12 @@ -52,4 +50,3 @@ if __name__ == "__main__": password = prompt_for_pass() print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds)) - diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index 7914ead889..e630936f78 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -36,12 +36,9 @@ from __future__ import print_function import argparse import logging - -import sys - import os - import shutil +import sys from synapse.rest.media.v1.filepath import MediaFilePaths @@ -77,24 +74,23 @@ def move_media(origin_server, file_id, src_paths, dest_paths): if not os.path.exists(original_file): logger.warn( "Original for %s/%s (%s) does not exist", - origin_server, file_id, original_file, + origin_server, + file_id, + original_file, ) else: mkdir_and_move( - original_file, - dest_paths.remote_media_filepath(origin_server, file_id), + original_file, dest_paths.remote_media_filepath(origin_server, file_id) ) # now look for thumbnails - original_thumb_dir = src_paths.remote_media_thumbnail_dir( - origin_server, file_id, - ) + original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) if not os.path.exists(original_thumb_dir): return mkdir_and_move( original_thumb_dir, - dest_paths.remote_media_thumbnail_dir(origin_server, file_id) + dest_paths.remote_media_thumbnail_dir(origin_server, file_id), ) @@ -109,24 +105,16 @@ def mkdir_and_move(original_file, dest_file): if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class = argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument( - "-v", action='store_true', help='enable debug logging') - parser.add_argument( - "src_repo", - help="Path to source content repo", - ) - parser.add_argument( - "dest_repo", - help="Path to source content repo", + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) + parser.add_argument("-v", action='store_true', help='enable debug logging') + parser.add_argument("src_repo", help="Path to source content repo") + parser.add_argument("dest_repo", help="Path to source content repo") args = parser.parse_args() logging_config = { "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s" + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", } logging.basicConfig(**logging_config) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 91bdb3a25b..89143c5d59 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import argparse import getpass @@ -22,19 +23,23 @@ import hmac import json import sys import urllib2 + +from six import input + import yaml def request_registration(user, password, server_location, shared_secret, admin=False): req = urllib2.Request( "%s/_matrix/client/r0/admin/register" % (server_location,), - headers={'Content-Type': 'application/json'} + headers={'Content-Type': 'application/json'}, ) try: if sys.version_info[:3] >= (2, 7, 9): # As of version 2.7.9, urllib2 now checks SSL certs import ssl + f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) else: f = urllib2.urlopen(req) @@ -42,18 +47,15 @@ def request_registration(user, password, server_location, shared_secret, admin=F f.close() nonce = json.loads(body)["nonce"] except urllib2.HTTPError as e: - print "ERROR! Received %d %s" % (e.code, e.reason,) + print("ERROR! Received %d %s" % (e.code, e.reason)) if 400 <= e.code < 500: if e.info().type == "application/json": resp = json.load(e) if "error" in resp: - print resp["error"] + print(resp["error"]) sys.exit(1) - mac = hmac.new( - key=shared_secret, - digestmod=hashlib.sha1, - ) + mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1) mac.update(nonce) mac.update("\x00") @@ -75,30 +77,31 @@ def request_registration(user, password, server_location, shared_secret, admin=F server_location = server_location.rstrip("/") - print "Sending registration request..." + print("Sending registration request...") req = urllib2.Request( "%s/_matrix/client/r0/admin/register" % (server_location,), data=json.dumps(data), - headers={'Content-Type': 'application/json'} + headers={'Content-Type': 'application/json'}, ) try: if sys.version_info[:3] >= (2, 7, 9): # As of version 2.7.9, urllib2 now checks SSL certs import ssl + f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) else: f = urllib2.urlopen(req) f.read() f.close() - print "Success." + print("Success.") except urllib2.HTTPError as e: - print "ERROR! Received %d %s" % (e.code, e.reason,) + print("ERROR! Received %d %s" % (e.code, e.reason)) if 400 <= e.code < 500: if e.info().type == "application/json": resp = json.load(e) if "error" in resp: - print resp["error"] + print(resp["error"]) sys.exit(1) @@ -106,35 +109,35 @@ def register_new_user(user, password, server_location, shared_secret, admin): if not user: try: default_user = getpass.getuser() - except: + except Exception: default_user = None if default_user: - user = raw_input("New user localpart [%s]: " % (default_user,)) + user = input("New user localpart [%s]: " % (default_user,)) if not user: user = default_user else: - user = raw_input("New user localpart: ") + user = input("New user localpart: ") if not user: - print "Invalid user name" + print("Invalid user name") sys.exit(1) if not password: password = getpass.getpass("Password: ") if not password: - print "Password cannot be blank." + print("Password cannot be blank.") sys.exit(1) confirm_password = getpass.getpass("Confirm password: ") if password != confirm_password: - print "Passwords do not match" + print("Passwords do not match") sys.exit(1) if admin is None: - admin = raw_input("Make admin [no]: ") + admin = input("Make admin [no]: ") if admin in ("y", "yes", "true"): admin = True else: @@ -146,42 +149,51 @@ def register_new_user(user, password, server_location, shared_secret, admin): if __name__ == "__main__": parser = argparse.ArgumentParser( description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" - " configured with the 'registration_shared_secret' option" - " set.", + " registration has been disabled. The home server must be" + " configured with the 'registration_shared_secret' option" + " set." ) parser.add_argument( - "-u", "--user", + "-u", + "--user", default=None, help="Local part of the new user. Will prompt if omitted.", ) parser.add_argument( - "-p", "--password", + "-p", + "--password", default=None, help="New password for user. Will prompt if omitted.", ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( - "-a", "--admin", + "-a", + "--admin", action="store_true", - help="Register new user as an admin. Will prompt if --no-admin is not set either.", + help=( + "Register new user as an admin. " + "Will prompt if --no-admin is not set either." + ), ) admin_group.add_argument( "--no-admin", action="store_true", - help="Register new user as a regular user. Will prompt if --admin is not set either.", + help=( + "Register new user as a regular user. " + "Will prompt if --admin is not set either." + ), ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( - "-c", "--config", + "-c", + "--config", type=argparse.FileType('r'), help="Path to server config file. Used to read in shared secret.", ) group.add_argument( - "-k", "--shared-secret", - help="Shared secret as defined in server config file.", + "-k", "--shared-secret", help="Shared secret as defined in server config file." ) parser.add_argument( @@ -189,7 +201,7 @@ if __name__ == "__main__": default="https://localhost:8448", nargs='?', help="URL to use to talk to the home server. Defaults to " - " 'https://localhost:8448'.", + " 'https://localhost:8448'.", ) args = parser.parse_args() @@ -198,7 +210,7 @@ if __name__ == "__main__": config = yaml.safe_load(args.config) secret = config.get("registration_shared_secret", None) if not secret: - print "No 'registration_shared_secret' defined in config." + print("No 'registration_shared_secret' defined in config.") sys.exit(1) else: secret = args.shared_secret diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index b9b828c154..2f6e69e552 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -15,23 +15,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from twisted.enterprise import adbapi - -from synapse.storage._base import LoggingTransaction, SQLBaseStore -from synapse.storage.engines import create_engine -from synapse.storage.prepare_database import prepare_database - import argparse import curses import logging import sys import time import traceback -import yaml from six import string_types +import yaml + +from twisted.enterprise import adbapi +from twisted.internet import defer, reactor + +from synapse.storage._base import LoggingTransaction, SQLBaseStore +from synapse.storage.engines import create_engine +from synapse.storage.prepare_database import prepare_database logger = logging.getLogger("synapse_port_db") @@ -105,6 +105,7 @@ class Store(object): *All* database interactions should go through this object. """ + def __init__(self, db_pool, engine): self.db_pool = db_pool self.database_engine = engine @@ -135,7 +136,8 @@ class Store(object): txn = conn.cursor() return func( LoggingTransaction(txn, desc, self.database_engine, [], []), - *args, **kwargs + *args, + **kwargs ) except self.database_engine.module.DatabaseError as e: if self.database_engine.is_deadlock(e): @@ -158,22 +160,20 @@ class Store(object): def r(txn): txn.execute(sql, args) return txn.fetchall() + return self.runInteraction("execute_sql", r) def insert_many_txn(self, txn, table, headers, rows): sql = "INSERT INTO %s (%s) VALUES (%s)" % ( table, ", ".join(k for k in headers), - ", ".join("%s" for _ in headers) + ", ".join("%s" for _ in headers), ) try: txn.executemany(sql, rows) - except: - logger.exception( - "Failed to insert: %s", - table, - ) + except Exception: + logger.exception("Failed to insert: %s", table) raise @@ -206,7 +206,7 @@ class Porter(object): "table_name": table, "forward_rowid": 1, "backward_rowid": 0, - } + }, ) forward_chunk = 1 @@ -221,10 +221,10 @@ class Porter(object): table, forward_chunk, backward_chunk ) else: + def delete_all(txn): txn.execute( - "DELETE FROM port_from_sqlite3 WHERE table_name = %s", - (table,) + "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) ) txn.execute("TRUNCATE %s CASCADE" % (table,)) @@ -232,11 +232,7 @@ class Porter(object): yield self.postgres_store._simple_insert( table="port_from_sqlite3", - values={ - "table_name": table, - "forward_rowid": 1, - "backward_rowid": 0, - } + values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, ) forward_chunk = 1 @@ -251,12 +247,16 @@ class Porter(object): ) @defer.inlineCallbacks - def handle_table(self, table, postgres_size, table_size, forward_chunk, - backward_chunk): + def handle_table( + self, table, postgres_size, table_size, forward_chunk, backward_chunk + ): logger.info( "Table %s: %i/%i (rows %i-%i) already ported", - table, postgres_size, table_size, - backward_chunk+1, forward_chunk-1, + table, + postgres_size, + table_size, + backward_chunk + 1, + forward_chunk - 1, ) if not table_size: @@ -271,7 +271,9 @@ class Porter(object): return if table in ( - "user_directory", "user_directory_search", "users_who_share_rooms", + "user_directory", + "user_directory_search", + "users_who_share_rooms", "users_in_pubic_room", ): # We don't port these tables, as they're a faff and we can regenreate @@ -283,37 +285,35 @@ class Porter(object): # We need to make sure there is a single row, `(X, null), as that is # what synapse expects to be there. yield self.postgres_store._simple_insert( - table=table, - values={"stream_id": None}, + table=table, values={"stream_id": None} ) self.progress.update(table, table_size) # Mark table as done return forward_select = ( - "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" - % (table,) + "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) ) backward_select = ( - "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" - % (table,) + "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) ) do_forward = [True] do_backward = [True] while True: + def r(txn): forward_rows = [] backward_rows = [] if do_forward[0]: - txn.execute(forward_select, (forward_chunk, self.batch_size,)) + txn.execute(forward_select, (forward_chunk, self.batch_size)) forward_rows = txn.fetchall() if not forward_rows: do_forward[0] = False if do_backward[0]: - txn.execute(backward_select, (backward_chunk, self.batch_size,)) + txn.execute(backward_select, (backward_chunk, self.batch_size)) backward_rows = txn.fetchall() if not backward_rows: do_backward[0] = False @@ -325,9 +325,7 @@ class Porter(object): return headers, forward_rows, backward_rows - headers, frows, brows = yield self.sqlite_store.runInteraction( - "select", r - ) + headers, frows, brows = yield self.sqlite_store.runInteraction("select", r) if frows or brows: if frows: @@ -339,9 +337,7 @@ class Porter(object): rows = self._convert_rows(table, headers, rows) def insert(txn): - self.postgres_store.insert_many_txn( - txn, table, headers[1:], rows - ) + self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) self.postgres_store._simple_update_one_txn( txn, @@ -362,8 +358,9 @@ class Porter(object): return @defer.inlineCallbacks - def handle_search_table(self, postgres_size, table_size, forward_chunk, - backward_chunk): + def handle_search_table( + self, postgres_size, table_size, forward_chunk, backward_chunk + ): select = ( "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" " FROM event_search as es" @@ -373,8 +370,9 @@ class Porter(object): ) while True: + def r(txn): - txn.execute(select, (forward_chunk, self.batch_size,)) + txn.execute(select, (forward_chunk, self.batch_size)) rows = txn.fetchall() headers = [column[0] for column in txn.description] @@ -402,18 +400,21 @@ class Porter(object): else: rows_dict.append(d) - txn.executemany(sql, [ - ( - row["event_id"], - row["room_id"], - row["key"], - row["sender"], - row["value"], - row["origin_server_ts"], - row["stream_ordering"], - ) - for row in rows_dict - ]) + txn.executemany( + sql, + [ + ( + row["event_id"], + row["room_id"], + row["key"], + row["sender"], + row["value"], + row["origin_server_ts"], + row["stream_ordering"], + ) + for row in rows_dict + ], + ) self.postgres_store._simple_update_one_txn( txn, @@ -437,7 +438,8 @@ class Porter(object): def setup_db(self, db_config, database_engine): db_conn = database_engine.module.connect( **{ - k: v for k, v in db_config.get("args", {}).items() + k: v + for k, v in db_config.get("args", {}).items() if not k.startswith("cp_") } ) @@ -450,13 +452,11 @@ class Porter(object): def run(self): try: sqlite_db_pool = adbapi.ConnectionPool( - self.sqlite_config["name"], - **self.sqlite_config["args"] + self.sqlite_config["name"], **self.sqlite_config["args"] ) postgres_db_pool = adbapi.ConnectionPool( - self.postgres_config["name"], - **self.postgres_config["args"] + self.postgres_config["name"], **self.postgres_config["args"] ) sqlite_engine = create_engine(sqlite_config) @@ -465,9 +465,7 @@ class Porter(object): self.sqlite_store = Store(sqlite_db_pool, sqlite_engine) self.postgres_store = Store(postgres_db_pool, postgres_engine) - yield self.postgres_store.execute( - postgres_engine.check_database - ) + yield self.postgres_store.execute(postgres_engine.check_database) # Step 1. Set up databases. self.progress.set_state("Preparing SQLite3") @@ -477,6 +475,7 @@ class Porter(object): self.setup_db(postgres_config, postgres_engine) self.progress.set_state("Creating port tables") + def create_port_table(txn): txn.execute( "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" @@ -501,9 +500,7 @@ class Porter(object): ) try: - yield self.postgres_store.runInteraction( - "alter_table", alter_table - ) + yield self.postgres_store.runInteraction("alter_table", alter_table) except Exception as e: pass @@ -514,11 +511,7 @@ class Porter(object): # Step 2. Get tables. self.progress.set_state("Fetching tables") sqlite_tables = yield self.sqlite_store._simple_select_onecol( - table="sqlite_master", - keyvalues={ - "type": "table", - }, - retcol="name", + table="sqlite_master", keyvalues={"type": "table"}, retcol="name" ) postgres_tables = yield self.postgres_store._simple_select_onecol( @@ -545,18 +538,14 @@ class Porter(object): # Step 4. Do the copying. self.progress.set_state("Copying to postgres") yield defer.gatherResults( - [ - self.handle_table(*res) - for res in setup_res - ], - consumeErrors=True, + [self.handle_table(*res) for res in setup_res], consumeErrors=True ) # Step 5. Do final post-processing yield self._setup_state_group_id_seq() self.progress.done() - except: + except Exception: global end_error_exec_info end_error_exec_info = sys.exc_info() logger.exception("") @@ -566,9 +555,7 @@ class Porter(object): def _convert_rows(self, table, headers, rows): bool_col_names = BOOLEAN_COLUMNS.get(table, []) - bool_cols = [ - i for i, h in enumerate(headers) if h in bool_col_names - ] + bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] class BadValueException(Exception): pass @@ -577,18 +564,21 @@ class Porter(object): if j in bool_cols: return bool(col) elif isinstance(col, string_types) and "\0" in col: - logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col) - raise BadValueException(); + logger.warn( + "DROPPING ROW: NUL value in table %s col %s: %r", + table, + headers[j], + col, + ) + raise BadValueException() return col outrows = [] for i, row in enumerate(rows): try: - outrows.append(tuple( - conv(j, col) - for j, col in enumerate(row) - if j > 0 - )) + outrows.append( + tuple(conv(j, col) for j, col in enumerate(row) if j > 0) + ) except BadValueException: pass @@ -616,9 +606,7 @@ class Porter(object): return headers, [r for r in rows if r[ts_ind] < yesterday] - headers, rows = yield self.sqlite_store.runInteraction( - "select", r, - ) + headers, rows = yield self.sqlite_store.runInteraction("select", r) rows = self._convert_rows("sent_transactions", headers, rows) @@ -639,7 +627,7 @@ class Porter(object): txn.execute( "SELECT rowid FROM sent_transactions WHERE ts >= ?" " ORDER BY rowid ASC LIMIT 1", - (yesterday,) + (yesterday,), ) rows = txn.fetchall() @@ -657,21 +645,17 @@ class Porter(object): "table_name": "sent_transactions", "forward_rowid": next_chunk, "backward_rowid": 0, - } + }, ) def get_sent_table_size(txn): txn.execute( - "SELECT count(*) FROM sent_transactions" - " WHERE ts >= ?", - (yesterday,) + "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) ) size, = txn.fetchone() return int(size) - remaining_count = yield self.sqlite_store.execute( - get_sent_table_size - ) + remaining_count = yield self.sqlite_store.execute(get_sent_table_size) total_count = remaining_count + inserted_rows @@ -680,13 +664,11 @@ class Porter(object): @defer.inlineCallbacks def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): frows = yield self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), - forward_chunk, + "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk ) brows = yield self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), - backward_chunk, + "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk ) defer.returnValue(frows[0][0] + brows[0][0]) @@ -694,7 +676,7 @@ class Porter(object): @defer.inlineCallbacks def _get_already_ported_count(self, table): rows = yield self.postgres_store.execute_sql( - "SELECT count(*) FROM %s" % (table,), + "SELECT count(*) FROM %s" % (table,) ) defer.returnValue(rows[0][0]) @@ -717,22 +699,21 @@ class Porter(object): def _setup_state_group_id_seq(self): def r(txn): txn.execute("SELECT MAX(id) FROM state_groups") - next_id = txn.fetchone()[0]+1 - txn.execute( - "ALTER SEQUENCE state_group_id_seq RESTART WITH %s", - (next_id,), - ) + next_id = txn.fetchone()[0] + 1 + txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) + return self.postgres_store.runInteraction("setup_state_group_id_seq", r) ############################################## -###### The following is simply UI stuff ###### +# The following is simply UI stuff ############################################## class Progress(object): """Used to report progress of the port """ + def __init__(self): self.tables = {} @@ -758,6 +739,7 @@ class Progress(object): class CursesProgress(Progress): """Reports progress to a curses window """ + def __init__(self, stdscr): self.stdscr = stdscr @@ -801,7 +783,7 @@ class CursesProgress(Progress): duration = int(now) - int(self.start_time) minutes, seconds = divmod(duration, 60) - duration_str = '%02dm %02ds' % (minutes, seconds,) + duration_str = '%02dm %02ds' % (minutes, seconds) if self.finished: status = "Time spent: %s (Done!)" % (duration_str,) @@ -814,16 +796,12 @@ class CursesProgress(Progress): est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60) else: est_remaining_str = "Unknown" - status = ( - "Time spent: %s (est. remaining: %s)" - % (duration_str, est_remaining_str,) + status = "Time spent: %s (est. remaining: %s)" % ( + duration_str, + est_remaining_str, ) - self.stdscr.addstr( - 0, 0, - status, - curses.A_BOLD, - ) + self.stdscr.addstr(0, 0, status, curses.A_BOLD) max_len = max([len(t) for t in self.tables.keys()]) @@ -831,9 +809,7 @@ class CursesProgress(Progress): middle_space = 1 items = self.tables.items() - items.sort( - key=lambda i: (i[1]["perc"], i[0]), - ) + items.sort(key=lambda i: (i[1]["perc"], i[0])) for i, (table, data) in enumerate(items): if i + 2 >= rows: @@ -844,9 +820,7 @@ class CursesProgress(Progress): color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) self.stdscr.addstr( - i + 2, left_margin + max_len - len(table), - table, - curses.A_BOLD | color, + i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color ) size = 20 @@ -857,15 +831,13 @@ class CursesProgress(Progress): ) self.stdscr.addstr( - i + 2, left_margin + max_len + middle_space, + i + 2, + left_margin + max_len + middle_space, "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), ) if self.finished: - self.stdscr.addstr( - rows - 1, 0, - "Press any key to exit...", - ) + self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") self.stdscr.refresh() self.last_update = time.time() @@ -877,29 +849,25 @@ class CursesProgress(Progress): def set_state(self, state): self.stdscr.clear() - self.stdscr.addstr( - 0, 0, - state + "...", - curses.A_BOLD, - ) + self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) self.stdscr.refresh() class TerminalProgress(Progress): """Just prints progress to the terminal """ + def update(self, table, num_done): super(TerminalProgress, self).update(table, num_done) data = self.tables[table] - print "%s: %d%% (%d/%d)" % ( - table, data["perc"], - data["num_done"], data["total"], + print( + "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) ) def set_state(self, state): - print state + "..." + print(state + "...") ############################################## @@ -909,34 +877,38 @@ class TerminalProgress(Progress): if __name__ == "__main__": parser = argparse.ArgumentParser( description="A script to port an existing synapse SQLite database to" - " a new PostgreSQL database." + " a new PostgreSQL database." ) parser.add_argument("-v", action='store_true') parser.add_argument( - "--sqlite-database", required=True, + "--sqlite-database", + required=True, help="The snapshot of the SQLite database file. This must not be" - " currently used by a running synapse server" + " currently used by a running synapse server", ) parser.add_argument( - "--postgres-config", type=argparse.FileType('r'), required=True, - help="The database config file for the PostgreSQL database" + "--postgres-config", + type=argparse.FileType('r'), + required=True, + help="The database config file for the PostgreSQL database", ) parser.add_argument( - "--curses", action='store_true', - help="display a curses based progress UI" + "--curses", action='store_true', help="display a curses based progress UI" ) parser.add_argument( - "--batch-size", type=int, default=1000, + "--batch-size", + type=int, + default=1000, help="The number of rows to select from the SQLite table each" - " iteration [default=1000]", + " iteration [default=1000]", ) args = parser.parse_args() logging_config = { "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s" + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", } if args.curses: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..84f8fe2d9e 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -568,7 +568,7 @@ def run(hs): clock.call_later(5 * 60, start_phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: - print (hs.config.pid_file) + print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 8fccf573ee..79fe9c3dac 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -28,7 +28,7 @@ if __name__ == "__main__": sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) - print (getattr(config, key)) + print(getattr(config, key)) sys.exit(0) else: sys.stderr.write("Unknown command %r\n" % (action,)) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 3d2e90dd5b..14dae65ea0 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -106,10 +106,7 @@ class Config(object): @classmethod def check_file(cls, file_path, config_name): if file_path is None: - raise ConfigError( - "Missing config for %s." - % (config_name,) - ) + raise ConfigError("Missing config for %s." % (config_name,)) try: os.stat(file_path) except OSError as e: @@ -128,9 +125,7 @@ class Config(object): if e.errno != errno.EEXIST: raise if not os.path.isdir(dir_path): - raise ConfigError( - "%s is not a directory" % (dir_path,) - ) + raise ConfigError("%s is not a directory" % (dir_path,)) return dir_path @classmethod @@ -156,21 +151,20 @@ class Config(object): return results def generate_config( - self, - config_dir_path, - server_name, - is_generating_file, - report_stats=None, + self, config_dir_path, server_name, is_generating_file, report_stats=None ): default_config = "# vim:ft=yaml\n" - default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all( - "default_config", - config_dir_path=config_dir_path, - server_name=server_name, - is_generating_file=is_generating_file, - report_stats=report_stats, - )) + default_config += "\n\n".join( + dedent(conf) + for conf in self.invoke_all( + "default_config", + config_dir_path=config_dir_path, + server_name=server_name, + is_generating_file=is_generating_file, + report_stats=report_stats, + ) + ) config = yaml.load(default_config) @@ -178,23 +172,22 @@ class Config(object): @classmethod def load_config(cls, description, argv): - config_parser = argparse.ArgumentParser( - description=description, - ) + config_parser = argparse.ArgumentParser(description=description) config_parser.add_argument( - "-c", "--config-path", + "-c", + "--config-path", action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files." + " may specify directories containing *.yaml files.", ) config_parser.add_argument( "--keys-directory", metavar="DIRECTORY", help="Where files such as certs and signing keys are stored when" - " their location is given explicitly in the config." - " Defaults to the directory containing the last config file", + " their location is given explicitly in the config." + " Defaults to the directory containing the last config file", ) config_args = config_parser.parse_args(argv) @@ -203,9 +196,7 @@ class Config(object): obj = cls() obj.read_config_files( - config_files, - keys_directory=config_args.keys_directory, - generate_keys=False, + config_files, keys_directory=config_args.keys_directory, generate_keys=False ) return obj @@ -213,38 +204,38 @@ class Config(object): def load_or_generate_config(cls, description, argv): config_parser = argparse.ArgumentParser(add_help=False) config_parser.add_argument( - "-c", "--config-path", + "-c", + "--config-path", action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files." + " may specify directories containing *.yaml files.", ) config_parser.add_argument( "--generate-config", action="store_true", - help="Generate a config file for the server name" + help="Generate a config file for the server name", ) config_parser.add_argument( "--report-stats", action="store", help="Whether the generated config reports anonymized usage statistics", - choices=["yes", "no"] + choices=["yes", "no"], ) config_parser.add_argument( "--generate-keys", action="store_true", - help="Generate any missing key files then exit" + help="Generate any missing key files then exit", ) config_parser.add_argument( "--keys-directory", metavar="DIRECTORY", help="Used with 'generate-*' options to specify where files such as" - " certs and signing keys should be stored in, unless explicitly" - " specified in the config." + " certs and signing keys should be stored in, unless explicitly" + " specified in the config.", ) config_parser.add_argument( - "-H", "--server-name", - help="The server name to generate a config file for" + "-H", "--server-name", help="The server name to generate a config file for" ) config_args, remaining_args = config_parser.parse_known_args(argv) @@ -257,8 +248,8 @@ class Config(object): if config_args.generate_config: if config_args.report_stats is None: config_parser.error( - "Please specify either --report-stats=yes or --report-stats=no\n\n" + - MISSING_REPORT_STATS_SPIEL + "Please specify either --report-stats=yes or --report-stats=no\n\n" + + MISSING_REPORT_STATS_SPIEL ) if not config_files: config_parser.error( @@ -287,26 +278,32 @@ class Config(object): config_dir_path=config_dir_path, server_name=server_name, report_stats=(config_args.report_stats == "yes"), - is_generating_file=True + is_generating_file=True, ) obj.invoke_all("generate_files", config) config_file.write(config_str) - print(( - "A config file has been generated in %r for server name" - " %r with corresponding SSL keys and self-signed" - " certificates. Please review this file and customise it" - " to your needs." - ) % (config_path, server_name)) + print( + ( + "A config file has been generated in %r for server name" + " %r with corresponding SSL keys and self-signed" + " certificates. Please review this file and customise it" + " to your needs." + ) + % (config_path, server_name) + ) print( "If this server name is incorrect, you will need to" " regenerate the SSL certificates" ) return else: - print(( - "Config file %r already exists. Generating any missing key" - " files." - ) % (config_path,)) + print( + ( + "Config file %r already exists. Generating any missing key" + " files." + ) + % (config_path,) + ) generate_keys = True parser = argparse.ArgumentParser( @@ -338,8 +335,7 @@ class Config(object): return obj - def read_config_files(self, config_files, keys_directory=None, - generate_keys=False): + def read_config_files(self, config_files, keys_directory=None, generate_keys=False): if not keys_directory: keys_directory = os.path.dirname(config_files[-1]) @@ -364,8 +360,9 @@ class Config(object): if "report_stats" not in config: raise ConfigError( - MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + - MISSING_REPORT_STATS_SPIEL + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + + "\n" + + MISSING_REPORT_STATS_SPIEL ) if generate_keys: @@ -399,16 +396,16 @@ def find_config_files(search_paths): for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if not os.path.isfile(entry_path): - print ( - "Found subdirectory in config directory: %r. IGNORING." - ) % (entry_path, ) + err = "Found subdirectory in config directory: %r. IGNORING." + print(err % (entry_path,)) continue if not entry.endswith(".yaml"): - print ( - "Found file in config directory that does not" - " end in '.yaml': %r. IGNORING." - ) % (entry_path, ) + err = ( + "Found file in config directory that does not end in " + "'.yaml': %r. IGNORING." + ) + print(err % (entry_path,)) continue files.append(entry_path) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index be61147b9b..d9d0255d0b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,7 +18,7 @@ import threading import time from six import PY2, iteritems, iterkeys, itervalues -from six.moves import intern, range +from six.moves import builtins, intern, range from canonicaljson import json from prometheus_client import Histogram @@ -1233,7 +1233,7 @@ def db_to_json(db_content): # psycopg2 on Python 2 returns buffer objects, which we need to cast to # bytes to decode - if PY2 and isinstance(db_content, buffer): + if PY2 and isinstance(db_content, builtins.buffer): db_content = bytes(db_content) # Decode it to a Unicode string before feeding it to json.loads, so we diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index a1331c1a61..8af17921e3 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index c7987bfcdd..2743b52bad 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -29,7 +29,7 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 5623391f6e..158e9dbe7b 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -27,7 +27,7 @@ from ._base import SQLBaseStore # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index a3032cdce9..d8bf953ec0 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -30,7 +30,7 @@ from ._base import SQLBaseStore, db_to_json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index f2bde74dc5..625aedc940 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -15,6 +15,8 @@ import logging +from six import integer_types + from sortedcontainers import SortedDict from synapse.util import caches @@ -47,7 +49,7 @@ class StreamChangeCache(object): def has_entity_changed(self, entity, stream_pos): """Returns True if the entity may have been updated since stream_pos """ - assert type(stream_pos) is int or type(stream_pos) is long + assert type(stream_pos) in integer_types if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() diff --git a/synctl b/synctl index bb8cb084cc..7e79b05c39 100755 --- a/synctl +++ b/synctl @@ -76,8 +76,7 @@ def start(configfile): try: subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % - (configfile,), colour=GREEN) + write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN) except subprocess.CalledProcessError as e: write( "error starting (exit code: %d); see above for logs" % e.returncode, @@ -86,21 +85,15 @@ def start(configfile): def start_worker(app, configfile, worker_configfile): - args = [ - sys.executable, "-B", - "-m", app, - "-c", configfile, - "-c", worker_configfile - ] + args = [sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile] try: subprocess.check_call(args) write("started %s(%r)" % (app, worker_configfile), colour=GREEN) except subprocess.CalledProcessError as e: write( - "error starting %s(%r) (exit code: %d); see above for logs" % ( - app, worker_configfile, e.returncode, - ), + "error starting %s(%r) (exit code: %d); see above for logs" + % (app, worker_configfile, e.returncode), colour=RED, ) @@ -120,9 +113,9 @@ def stop(pidfile, app): abort("Cannot stop %s: Unknown error" % (app,)) -Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor", "cache_factors", -]) +Worker = collections.namedtuple( + "Worker", ["app", "configfile", "pidfile", "cache_factor", "cache_factors"] +) def main(): @@ -141,24 +134,20 @@ def main(): help="the homeserver config file, defaults to homeserver.yaml", ) parser.add_argument( - "-w", "--worker", - metavar="WORKERCONFIG", - help="start or stop a single worker", + "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker" ) parser.add_argument( - "-a", "--all-processes", + "-a", + "--all-processes", metavar="WORKERCONFIGDIR", help="start or stop all the workers in the given directory" - " and the main synapse process", + " and the main synapse process", ) options = parser.parse_args() if options.worker and options.all_processes: - write( - 'Cannot use "--worker" with "--all-processes"', - stream=sys.stderr - ) + write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) sys.exit(1) configfile = options.configfile @@ -167,9 +156,7 @@ def main(): write( "No config file found\n" "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), options.configfile - ), + " --server-name='\n" % (" ".join(SYNAPSE), options.configfile), stream=sys.stderr, ) sys.exit(1) @@ -194,8 +181,7 @@ def main(): worker_configfile = options.worker if not os.path.exists(worker_configfile): write( - "No worker config found at %r" % (worker_configfile,), - stream=sys.stderr, + "No worker config found at %r" % (worker_configfile,), stream=sys.stderr ) sys.exit(1) worker_configfiles.append(worker_configfile) @@ -211,9 +197,9 @@ def main(): stream=sys.stderr, ) sys.exit(1) - worker_configfiles.extend(sorted(glob.glob( - os.path.join(worker_configdir, "*.yaml") - ))) + worker_configfiles.extend( + sorted(glob.glob(os.path.join(worker_configdir, "*.yaml"))) + ) workers = [] for worker_configfile in worker_configfiles: @@ -223,14 +209,12 @@ def main(): if worker_app == "synapse.app.homeserver": # We need to special case all of this to pick up options that may # be set in the main config file or in this worker config file. - worker_pidfile = ( - worker_config.get("pid_file") - or pidfile + worker_pidfile = worker_config.get("pid_file") or pidfile + worker_cache_factor = ( + worker_config.get("synctl_cache_factor") or cache_factor ) - worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor worker_cache_factors = ( - worker_config.get("synctl_cache_factors") - or cache_factors + worker_config.get("synctl_cache_factors") or cache_factors ) daemonize = worker_config.get("daemonize") or config.get("daemonize") assert daemonize, "Main process must have daemonize set to true" @@ -239,19 +223,27 @@ def main(): for key in worker_config: if key == "worker_app": # But we allow worker_app continue - assert not key.startswith("worker_"), \ - "Main process cannot use worker_* config" + assert not key.startswith( + "worker_" + ), "Main process cannot use worker_* config" else: worker_pidfile = worker_config["worker_pid_file"] worker_daemonize = worker_config["worker_daemonize"] assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, "worker_daemonize") + worker_configfile, + "worker_daemonize", + ) worker_cache_factor = worker_config.get("synctl_cache_factor") worker_cache_factors = worker_config.get("synctl_cache_factors", {}) - workers.append(Worker( - worker_app, worker_configfile, worker_pidfile, worker_cache_factor, - worker_cache_factors, - )) + workers.append( + Worker( + worker_app, + worker_configfile, + worker_pidfile, + worker_cache_factor, + worker_cache_factors, + ) + ) action = options.action diff --git a/tox.ini b/tox.ini index 87b5e4782d..04d2f721bf 100644 --- a/tox.ini +++ b/tox.ini @@ -108,10 +108,10 @@ commands = [testenv:pep8] skip_install = True -basepython = python2.7 +basepython = python3.6 deps = flake8 -commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}" +commands = /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}" [testenv:check_isort] skip_install = True -- cgit 1.4.1 From a6f421e8120c74fd9948e6958d794b6455e2ac85 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 20 Oct 2018 02:13:01 +0100 Subject: Run MAU queries as background processes Fixes #3820 --- changelog.d/4076.misc | 1 + synapse/app/homeserver.py | 30 ++++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 changelog.d/4076.misc diff --git a/changelog.d/4076.misc b/changelog.d/4076.misc new file mode 100644 index 0000000000..9dd000decf --- /dev/null +++ b/changelog.d/4076.misc @@ -0,0 +1 @@ +Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings \ No newline at end of file diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..73ab267e8a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -530,10 +530,13 @@ def run(hs): clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) # monthly active user limiting functionality - clock.looping_call( - hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60 - ) - hs.get_datastore().reap_monthly_active_users() + def reap_monthly_active_users(): + return run_as_background_process( + "reap_monthly_active_users", + hs.get_datastore().reap_monthly_active_users, + ) + clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) + reap_monthly_active_users() @defer.inlineCallbacks def generate_monthly_active_users(): @@ -547,12 +550,23 @@ def run(hs): registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) - hs.get_datastore().initialise_reserved_users( - hs.config.mau_limits_reserved_threepids + def start_generate_monthly_active_users(): + return run_as_background_process( + "generate_monthly_active_users", + generate_monthly_active_users, + ) + + # XXX is this really supposed to be a background process? it looks + # like it needs to complete before some of the other stuff runs. + run_as_background_process( + "initialise_reserved_users", + hs.get_datastore().initialise_reserved_users, + hs.config.mau_limits_reserved_threepids, ) - generate_monthly_active_users() + + start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: - clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) + clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: -- cgit 1.4.1 From 058934b1cf90c1f25af19f7f9a7bf8ad17a50340 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 21 Oct 2018 12:18:30 +0100 Subject: uh, Matrix is called Matrix these days... --- setup.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b00c2af367..00b69c43f5 100755 --- a/setup.py +++ b/setup.py @@ -1,6 +1,8 @@ #!/usr/bin/env python -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2014-2017 OpenMarket Ltd +# Copyright 2017 Vector Creations Ltd +# Copyright 2017-2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +88,7 @@ setup( name="matrix-synapse", version=version, packages=find_packages(exclude=["tests", "tests.*"]), - description="Reference Synapse Home Server", + description="Reference homeserver for the Matrix decentralised comms protocol", install_requires=dependencies['requirements'](include_conditional=True).keys(), dependency_links=dependencies["DEPENDENCY_LINKS"].values(), include_package_data=True, -- cgit 1.4.1 From 3e8b02c9393684f2c96e221ced804a0d20de2da0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Rename _refresh_pusher This is public (or at least, called from outside the class), so ought to have a better name. --- synapse/app/pusher.py | 2 +- synapse/push/pusherpool.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 0f9f8e19f6..e06b70894e 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -183,7 +183,7 @@ class PusherReplicationHandler(ReplicationClientHandler): def start_pusher(self, user_id, app_id, pushkey): key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) - return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id) + return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) def start(config_options): diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 9f7d5ef217..bdfe27d8c1 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -86,7 +86,7 @@ class PusherPool: last_stream_ordering=last_stream_ordering, profile_tag=profile_tag, ) - yield self._refresh_pusher(app_id, pushkey, user_id) + yield self.start_pusher_by_id(app_id, pushkey, user_id) @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey, @@ -190,7 +190,8 @@ class PusherPool: logger.exception("Exception in pusher on_new_receipts") @defer.inlineCallbacks - def _refresh_pusher(self, app_id, pushkey, user_id): + def start_pusher_by_id(self, app_id, pushkey, user_id): + """Look up the details for the given pusher, and start it""" resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( app_id, pushkey ) -- cgit 1.4.1 From 04277d0ed8ba456aa6c2d5dca978013c295e7ace Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Factor PusherPool._start_pusher out of _start_pushers ... and use it from start_pusher_by_id. This mostly simplifies start_pusher_by_id. --- synapse/push/pusherpool.py | 51 ++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index bdfe27d8c1..a5c133aa33 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -192,6 +192,9 @@ class PusherPool: @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): """Look up the details for the given pusher, and start it""" + if not self._start_pushers: + return + resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( app_id, pushkey ) @@ -202,8 +205,7 @@ class PusherPool: p = r if p: - - self._start_pushers([p]) + self._start_pusher(p) def _start_pushers(self, pushers): if not self.start_pushers: @@ -211,24 +213,37 @@ class PusherPool: return logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: - try: - p = self.pusher_factory.create_pusher(pusherdict) - except Exception: - logger.exception("Couldn't start a pusher: caught Exception") - continue - if p: - appid_pushkey = "%s:%s" % ( - pusherdict['app_id'], - pusherdict['pushkey'], - ) - byuser = self.pushers.setdefault(pusherdict['user_name'], {}) + self._start_pusher(pusherdict) + logger.info("Started pushers") - if appid_pushkey in byuser: - byuser[appid_pushkey].on_stop() - byuser[appid_pushkey] = p - run_in_background(p.on_started) + def _start_pusher(self, pusherdict): + """Start the given pusher - logger.info("Started pushers") + Args: + pusherdict (dict): + + Returns: + None + """ + try: + p = self.pusher_factory.create_pusher(pusherdict) + except Exception: + logger.exception("Couldn't start a pusher: caught Exception") + return + + if not p: + return + + appid_pushkey = "%s:%s" % ( + pusherdict['app_id'], + pusherdict['pushkey'], + ) + byuser = self.pushers.setdefault(pusherdict['user_name'], {}) + + if appid_pushkey in byuser: + byuser[appid_pushkey].on_stop() + byuser[appid_pushkey] = p + run_in_background(p.on_started) @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): -- cgit 1.4.1 From 5110f4e4258564615edfca6112ab6a36eb551672 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: move get_all_pushers call down simplifies the interface to _start_pushers --- synapse/push/pusherpool.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index a5c133aa33..b9b68ec829 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -36,8 +36,7 @@ class PusherPool: @defer.inlineCallbacks def start(self): - pushers = yield self.store.get_all_pushers() - self._start_pushers(pushers) + yield self._start_pushers() @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, @@ -207,10 +206,17 @@ class PusherPool: if p: self._start_pusher(p) - def _start_pushers(self, pushers): + @defer.inlineCallbacks + def _start_pushers(self): + """Start all the pushers + + Returns: + Deferred + """ if not self.start_pushers: logger.info("Not starting pushers because they are disabled in the config") return + pushers = yield self.store.get_all_pushers() logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: self._start_pusher(pusherdict) -- cgit 1.4.1 From c7273c11bc73630e3f2ab1a82d20a40d8b17f9a3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Give pushers their own background logcontext Each pusher has its own loop which runs for as long as it has work to do. This should run in its own background thread with its own logcontext, as other similar loops elsewhere in the system do - which means that CPU usage is consistently attributed to that loop, rather than to whatever request happened to start the loop. --- synapse/push/emailpusher.py | 48 +++++++++++++++++----------------- synapse/push/httppusher.py | 64 +++++++++++++++++++++------------------------ 2 files changed, 54 insertions(+), 58 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index d746371420..0c9c0201e8 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -18,8 +18,7 @@ import logging from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled -from synapse.util.logcontext import LoggingContext -from synapse.util.metrics import Measure +from synapse.metrics.background_process_metrics import run_as_background_process logger = logging.getLogger(__name__) @@ -80,7 +79,7 @@ class EmailPusher(object): self.throttle_params = yield self.store.get_throttle_params_by_room( self.pusher_id ) - yield self._process() + self._start_processing() except Exception: logger.exception("Error starting email pusher") @@ -92,10 +91,10 @@ class EmailPusher(object): pass self.timed_call = None - @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) - yield self._process() + self._start_processing() + return defer.succeed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # We could wake up and cancel the timer but there tend to be quite a @@ -103,32 +102,33 @@ class EmailPusher(object): # timer fire return defer.succeed(None) - @defer.inlineCallbacks def on_timer(self): self.timed_call = None - yield self._process() + self._start_processing() - @defer.inlineCallbacks - def _process(self): + def _start_processing(self): if self.processing: return - with LoggingContext("emailpush._process"): - with Measure(self.clock, "emailpush._process"): + run_as_background_process("emailpush.process", self._process) + + @defer.inlineCallbacks + def _process(self): + try: + self.processing = True + + # if the max ordering changes while we're running _unsafe_process, + # call it again, and so on until we've caught up. + while True: + starting_max_ordering = self.max_stream_ordering try: - self.processing = True - # if the max ordering changes while we're running _unsafe_process, - # call it again, and so on until we've caught up. - while True: - starting_max_ordering = self.max_stream_ordering - try: - yield self._unsafe_process() - except Exception: - logger.exception("Exception processing notifs") - if self.max_stream_ordering == starting_max_ordering: - break - finally: - self.processing = False + yield self._unsafe_process() + except Exception: + logger.exception("Exception processing notifs") + if self.max_stream_ordering == starting_max_ordering: + break + finally: + self.processing = False @defer.inlineCallbacks def _unsafe_process(self): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 48abd5e4d6..5f6b21bc67 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -22,9 +22,8 @@ from prometheus_client import Counter from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException -from synapse.util.logcontext import LoggingContext -from synapse.util.metrics import Measure from . import push_rule_evaluator, push_tools @@ -92,34 +91,30 @@ class HttpPusher(object): self.data_minus_url.update(self.data) del self.data_minus_url['url'] - @defer.inlineCallbacks def on_started(self): - try: - yield self._process() - except Exception: - logger.exception("Error starting http pusher") + self._start_processing() + return defer.succeed(None) - @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) - yield self._process() + self._start_processing() + return defer.suceed(None) - @defer.inlineCallbacks def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... - with LoggingContext("push.on_new_receipts"): - with Measure(self.clock, "push.on_new_receipts"): - badge = yield push_tools.get_badge_count( - self.hs.get_datastore(), self.user_id - ) - yield self._send_badge(badge) + run_as_background_process("http_pusher.on_new_receipts", self._update_badge) + return defer.succeed(None) @defer.inlineCallbacks + def _update_badge(self): + badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) + yield self._send_badge(badge) + def on_timer(self): - yield self._process() + self._start_processing() def on_stop(self): if self.timed_call: @@ -129,27 +124,28 @@ class HttpPusher(object): pass self.timed_call = None - @defer.inlineCallbacks - def _process(self): + def _start_processing(self): if self.processing: return - with LoggingContext("push._process"): - with Measure(self.clock, "push._process"): + run_as_background_process("httppush.process", self._process) + + @defer.inlineCallbacks + def _process(self): + try: + self.processing = True + # if the max ordering changes while we're running _unsafe_process, + # call it again, and so on until we've caught up. + while True: + starting_max_ordering = self.max_stream_ordering try: - self.processing = True - # if the max ordering changes while we're running _unsafe_process, - # call it again, and so on until we've caught up. - while True: - starting_max_ordering = self.max_stream_ordering - try: - yield self._unsafe_process() - except Exception: - logger.exception("Exception processing notifs") - if self.max_stream_ordering == starting_max_ordering: - break - finally: - self.processing = False + yield self._unsafe_process() + except Exception: + logger.exception("Exception processing notifs") + if self.max_stream_ordering == starting_max_ordering: + break + finally: + self.processing = False @defer.inlineCallbacks def _unsafe_process(self): -- cgit 1.4.1 From e7a16c6210191e9556fb1c11cbff2d98f0a5206c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Remove redundant run_as_background_process() from pusherpool `on_new_notifications` and `on_new_receipts` in `HttpPusher` and `EmailPusher` now always return synchronously, so we can remove the `defer.gatherResults` on their results, and the `run_as_background_process` wrappers can be removed too because the PusherPool methods will now complete quickly enough. --- synapse/app/pusher.py | 4 ++-- synapse/handlers/federation.py | 4 ++-- synapse/handlers/message.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/push/emailpusher.py | 3 +-- synapse/push/httppusher.py | 2 -- synapse/push/pusherpool.py | 47 +++++++----------------------------------- 7 files changed, 14 insertions(+), 50 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index e06b70894e..83b0863f00 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -161,11 +161,11 @@ class PusherReplicationHandler(ReplicationClientHandler): else: yield self.start_pusher(row.user_id, row.app_id, row.pushkey) elif stream_name == "events": - self.pusher_pool.on_new_notifications( + yield self.pusher_pool.on_new_notifications( token, token, ) elif stream_name == "receipts": - self.pusher_pool.on_new_receipts( + yield self.pusher_pool.on_new_receipts( token, token, set(row.room_id for row in rows) ) except Exception: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cab57a8849..63e495e3f8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2520,7 +2520,7 @@ class FederationHandler(BaseHandler): if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - self._notify_persisted_event(event, max_stream_id) + yield self._notify_persisted_event(event, max_stream_id) def _notify_persisted_event(self, event, max_stream_id): """Checks to see if notifier/pushers should be notified about the @@ -2553,7 +2553,7 @@ class FederationHandler(BaseHandler): extra_users=extra_users ) - self.pusher_pool.on_new_notifications( + return self.pusher_pool.on_new_notifications( event_stream_id, max_stream_id, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4954b23a0d..6c4fcfb10a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -779,7 +779,7 @@ class EventCreationHandler(object): event, context=context ) - self.pusher_pool.on_new_notifications( + yield self.pusher_pool.on_new_notifications( event_stream_id, max_stream_id, ) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a6f3181f09..4c2690ba26 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -119,7 +119,7 @@ class ReceiptsHandler(BaseHandler): "receipt_key", max_batch_id, rooms=affected_room_ids ) # Note that the min here shouldn't be relied upon to be accurate. - self.hs.get_pusherpool().on_new_receipts( + yield self.hs.get_pusherpool().on_new_receipts( min_batch_id, max_batch_id, affected_room_ids, ) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 0c9c0201e8..d5a99b838c 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -94,13 +94,12 @@ class EmailPusher(object): def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) self._start_processing() - return defer.succeed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # We could wake up and cancel the timer but there tend to be quite a # lot of read receipts so it's probably less work to just let the # timer fire - return defer.succeed(None) + pass def on_timer(self): self.timed_call = None diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 5f6b21bc67..770f55feae 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -98,7 +98,6 @@ class HttpPusher(object): def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) self._start_processing() - return defer.suceed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. @@ -106,7 +105,6 @@ class HttpPusher(object): # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... run_as_background_process("http_pusher.on_new_receipts", self._update_badge) - return defer.succeed(None) @defer.inlineCallbacks def _update_badge(self): diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index b9b68ec829..a4d1ce3aad 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,9 +18,8 @@ import logging from twisted.internet import defer -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push.pusher import PusherFactory -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) @@ -122,45 +121,23 @@ class PusherPool: p['app_id'], p['pushkey'], p['user_name'], ) - def on_new_notifications(self, min_stream_id, max_stream_id): - run_as_background_process( - "on_new_notifications", - self._on_new_notifications, min_stream_id, max_stream_id, - ) - @defer.inlineCallbacks - def _on_new_notifications(self, min_stream_id, max_stream_id): + def on_new_notifications(self, min_stream_id, max_stream_id): try: users_affected = yield self.store.get_push_action_users_in_range( min_stream_id, max_stream_id ) - deferreds = [] - for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - deferreds.append( - run_in_background( - p.on_new_notifications, - min_stream_id, max_stream_id, - ) - ) - - yield make_deferred_yieldable( - defer.gatherResults(deferreds, consumeErrors=True), - ) + p.on_new_notifications(min_stream_id, max_stream_id) + except Exception: logger.exception("Exception in pusher on_new_notifications") - def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): - run_as_background_process( - "on_new_receipts", - self._on_new_receipts, min_stream_id, max_stream_id, affected_room_ids, - ) - @defer.inlineCallbacks - def _on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): + def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): try: # Need to subtract 1 from the minimum because the lower bound here # is not inclusive @@ -170,21 +147,11 @@ class PusherPool: # This returns a tuple, user_id is at index 3 users_affected = set([r[3] for r in updated_receipts]) - deferreds = [] - for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - deferreds.append( - run_in_background( - p.on_new_receipts, - min_stream_id, max_stream_id, - ) - ) - - yield make_deferred_yieldable( - defer.gatherResults(deferreds, consumeErrors=True), - ) + p.on_new_receipts(min_stream_id, max_stream_id) + except Exception: logger.exception("Exception in pusher on_new_receipts") -- cgit 1.4.1 From f749607c911a82e64685f1cfaeee892c49a1b606 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Make on_started synchronous too This brings it into line with on_new_notifications and on_new_receipts. It requires a little bit of hoop-jumping in EmailPusher to load the throttle params before the first loop. --- synapse/push/emailpusher.py | 15 +++++++-------- synapse/push/httppusher.py | 1 - synapse/push/pusherpool.py | 16 ++++++++++++++-- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index d5a99b838c..0bc548ae7a 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -72,16 +72,9 @@ class EmailPusher(object): self.processing = False - @defer.inlineCallbacks def on_started(self): if self.mailer is not None: - try: - self.throttle_params = yield self.store.get_throttle_params_by_room( - self.pusher_id - ) - self._start_processing() - except Exception: - logger.exception("Error starting email pusher") + self._start_processing() def on_stop(self): if self.timed_call: @@ -116,6 +109,12 @@ class EmailPusher(object): try: self.processing = True + if self.throttle_params is None: + # this is our first loop: load up the throttle params + self.throttle_params = yield self.store.get_throttle_params_by_room( + self.pusher_id + ) + # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 770f55feae..33034d44da 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -93,7 +93,6 @@ class HttpPusher(object): def on_started(self): self._start_processing() - return defer.succeed(None) def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index a4d1ce3aad..695e582dce 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -19,12 +19,24 @@ import logging from twisted.internet import defer from synapse.push.pusher import PusherFactory -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) class PusherPool: + """ + The pusher pool. This is responsible for dispatching notifications of new events to + the http and email pushers. + + It provides three methods which are designed to be called by the rest of the + application: `start`, `on_new_notifications`, and `on_new_receipts`: each of these + delegates to each of the relevant pushers. + + Note that it is expected that each pusher will have its own 'processing' loop which + will send out the notifications in the background, rather than blocking until the + notifications are sent; accordingly Pusher.on_started, Pusher.on_new_notifications and + Pusher.on_new_receipts are not expected to return deferreds. + """ def __init__(self, _hs): self.hs = _hs self.pusher_factory = PusherFactory(_hs) @@ -216,7 +228,7 @@ class PusherPool: if appid_pushkey in byuser: byuser[appid_pushkey].on_stop() byuser[appid_pushkey] = p - run_in_background(p.on_started) + p.on_started() @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): -- cgit 1.4.1 From 026cd91ac878bc723f653f393fd1f44c438ba559 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Run PusherPool.start as a background process We don't do anything with the result, so this is needed to give this code a logcontext. --- synapse/push/pusherpool.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 695e582dce..d25877cc66 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push.pusher import PusherFactory logger = logging.getLogger(__name__) @@ -45,9 +46,13 @@ class PusherPool: self.clock = self.hs.get_clock() self.pushers = {} - @defer.inlineCallbacks def start(self): - yield self._start_pushers() + """Starts the pushers off in a background process. + """ + if not self.start_pushers: + logger.info("Not starting pushers because they are disabled in the config") + return + run_as_background_process("start_pushers", self._start_pushers) @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, @@ -192,9 +197,6 @@ class PusherPool: Returns: Deferred """ - if not self.start_pushers: - logger.info("Not starting pushers because they are disabled in the config") - return pushers = yield self.store.get_all_pushers() logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: -- cgit 1.4.1 From abd9914683109d50f3998627eaa859caa500d63c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Changelog --- changelog.d/4075.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4075.misc diff --git a/changelog.d/4075.misc b/changelog.d/4075.misc new file mode 100644 index 0000000000..d08b8cc271 --- /dev/null +++ b/changelog.d/4075.misc @@ -0,0 +1 @@ +Clean up threading and logcontexts in pushers \ No newline at end of file -- cgit 1.4.1 From 6340141300ea9370651b9dbcc2574fff96f45d48 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 22 Oct 2018 16:17:27 +0100 Subject: README.rst: fix minor grammar --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 209313ba37..9f27c14c4a 100644 --- a/README.rst +++ b/README.rst @@ -651,7 +651,7 @@ Using a reverse proxy with Synapse It is recommended to put a reverse proxy such as `nginx `_, -`Apache `_ or +`Apache `_, `Caddy `_ or `HAProxy `_ in front of Synapse. One advantage of doing so is that it means that you can expose the default https port (443) to @@ -683,7 +683,7 @@ so an example nginx configuration might look like:: } } -an example caddy configuration may look like:: +an example Caddy configuration might look like:: matrix.example.com { proxy /_matrix http://localhost:8008 { @@ -691,7 +691,7 @@ an example caddy configuration may look like:: } } -and an example apache configuration may look like:: +and an example Apache configuration might look like:: SSLEngine on -- cgit 1.4.1 From 5c445114d356c9c23b99f0a2c4246be983a38b69 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 23 Oct 2018 13:12:32 +0100 Subject: Correctly account for cpu usage by background threads (#4074) Wrap calls to deferToThread() in a thing which uses a child logcontext to attribute CPU usage to the right request. While we're in the area, remove the logcontext_tracer stuff, which is never used, and afaik doesn't work. Fixes #4064 --- changelog.d/4074.bugfix | 1 + synapse/handlers/auth.py | 18 +---- synapse/rest/media/v1/media_repository.py | 24 +++--- synapse/rest/media/v1/media_storage.py | 8 +- synapse/rest/media/v1/storage_provider.py | 6 +- synapse/util/logcontext.py | 120 +++++++++++++++++------------- 6 files changed, 97 insertions(+), 80 deletions(-) create mode 100644 changelog.d/4074.bugfix diff --git a/changelog.d/4074.bugfix b/changelog.d/4074.bugfix new file mode 100644 index 0000000000..b3b6b00243 --- /dev/null +++ b/changelog.d/4074.bugfix @@ -0,0 +1 @@ +Correctly account for cpu usage by background threads diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2a5eab124f..329e3c7d71 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -22,7 +22,7 @@ import bcrypt import pymacaroons from canonicaljson import json -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.web.client import PartialDownloadError import synapse.util.stringutils as stringutils @@ -37,8 +37,8 @@ from synapse.api.errors import ( ) from synapse.module_api import ModuleApi from synapse.types import UserID +from synapse.util import logcontext from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable from ._base import BaseHandler @@ -884,11 +884,7 @@ class AuthHandler(BaseHandler): bcrypt.gensalt(self.bcrypt_rounds), ).decode('ascii') - return make_deferred_yieldable( - threads.deferToThreadPool( - self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash - ), - ) + return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -913,13 +909,7 @@ class AuthHandler(BaseHandler): if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode('ascii') - return make_deferred_yieldable( - threads.deferToThreadPool( - self.hs.get_reactor(), - self.hs.get_reactor().getThreadPool(), - _do_validate_hash, - ), - ) + return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash) else: return defer.succeed(False) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index a828ff4438..08b1867fab 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -25,7 +25,7 @@ from six.moves.urllib import parse as urlparse import twisted.internet.error import twisted.web.http -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.web.resource import Resource from synapse.api.errors import ( @@ -36,8 +36,8 @@ from synapse.api.errors import ( ) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import logcontext from synapse.util.async_helpers import Linearizer -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import is_ascii, random_string @@ -492,10 +492,11 @@ class MediaRepository(object): )) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - )) + ) if t_byte_source: try: @@ -534,10 +535,11 @@ class MediaRepository(object): )) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - )) + ) if t_byte_source: try: @@ -620,15 +622,17 @@ class MediaRepository(object): for (t_width, t_height, t_type), t_method in iteritems(thumbnails): # Generate the thumbnail if t_method == "crop": - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type, - )) + ) elif t_method == "scale": - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type, - )) + ) else: logger.error("Unrecognized method: %r", t_method) continue diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index a6189224ee..896078fe76 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -21,9 +21,10 @@ import sys import six -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.protocols.basic import FileSender +from synapse.util import logcontext from synapse.util.file_consumer import BackgroundFileConsumer from synapse.util.logcontext import make_deferred_yieldable @@ -64,9 +65,10 @@ class MediaStorage(object): with self.store_into_file(file_info) as (f, fname, finish_cb): # Write to the main repository - yield make_deferred_yieldable(threads.deferToThread( + yield logcontext.defer_to_thread( + self.hs.get_reactor(), _write_file_synchronously, source, f, - )) + ) yield finish_cb() defer.returnValue(fname) diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 7b9f8b4d79..5aa03031f6 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -17,9 +17,10 @@ import logging import os import shutil -from twisted.internet import defer, threads +from twisted.internet import defer from synapse.config._base import Config +from synapse.util import logcontext from synapse.util.logcontext import run_in_background from .media_storage import FileResponder @@ -120,7 +121,8 @@ class FileStorageProviderBackend(StorageProvider): if not os.path.exists(dirname): os.makedirs(dirname) - return threads.deferToThread( + return logcontext.defer_to_thread( + self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname, ) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 89224b26cc..4c6e92beb8 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -25,7 +25,7 @@ See doc/log_contexts.rst for details on how this works. import logging import threading -from twisted.internet import defer +from twisted.internet import defer, threads logger = logging.getLogger(__name__) @@ -562,58 +562,76 @@ def _set_context_cb(result, context): return result -# modules to ignore in `logcontext_tracer` -_to_ignore = [ - "synapse.util.logcontext", - "synapse.http.server", - "synapse.storage._base", - "synapse.util.async_helpers", -] +def defer_to_thread(reactor, f, *args, **kwargs): + """ + Calls the function `f` using a thread from the reactor's default threadpool and + returns the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked, and whose threadpool we should use for the + function. + + Normally this will be hs.get_reactor(). + + f (callable): The function to call. + args: positional arguments to pass to f. -def logcontext_tracer(frame, event, arg): - """A tracer that logs whenever a logcontext "unexpectedly" changes within - a function. Probably inaccurate. + kwargs: keyword arguments to pass to f. - Use by calling `sys.settrace(logcontext_tracer)` in the main thread. + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. """ - if event == 'call': - name = frame.f_globals["__name__"] - if name.startswith("synapse"): - if name == "synapse.util.logcontext": - if frame.f_code.co_name in ["__enter__", "__exit__"]: - tracer = frame.f_back.f_trace - if tracer: - tracer.just_changed = True - - tracer = frame.f_trace - if tracer: - return tracer - - if not any(name.startswith(ig) for ig in _to_ignore): - return LineTracer() - - -class LineTracer(object): - __slots__ = ["context", "just_changed"] - - def __init__(self): - self.context = LoggingContext.current_context() - self.just_changed = False - - def __call__(self, frame, event, arg): - if event in 'line': - if self.just_changed: - self.context = LoggingContext.current_context() - self.just_changed = False - else: - c = LoggingContext.current_context() - if c != self.context: - logger.info( - "Context changed! %s -> %s, %s, %s", - self.context, c, - frame.f_code.co_filename, frame.f_lineno - ) - self.context = c + return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) - return self + +def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): + """ + A wrapper for twisted.internet.threads.deferToThreadpool, which handles + logcontexts correctly. + + Calls the function `f` using a thread from the given threadpool and returns + the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked. Normally this will be hs.get_reactor(). + + threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for + running `f`. Normally this will be hs.get_reactor().getThreadPool(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + logcontext = LoggingContext.current_context() + + def g(): + with LoggingContext(parent_context=logcontext): + return f(*args, **kwargs) + + return make_deferred_yieldable( + threads.deferToThreadPool(reactor, threadpool, g) + ) -- cgit 1.4.1 From b3f6dddad2c3777e3620acfbb97e19409a5beb81 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 23 Oct 2018 14:29:17 +0100 Subject: Give some more things logcontexts (#4077) --- changelog.d/4077.misc | 1 + synapse/handlers/deactivate_account.py | 4 ++-- synapse/handlers/user_directory.py | 14 +++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/4077.misc diff --git a/changelog.d/4077.misc b/changelog.d/4077.misc new file mode 100644 index 0000000000..52ca4c1de2 --- /dev/null +++ b/changelog.d/4077.misc @@ -0,0 +1 @@ +Give some more things logcontexts diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index b078df4a76..75fe50c42c 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -17,8 +17,8 @@ import logging from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID, create_requester -from synapse.util.logcontext import run_in_background from ._base import BaseHandler @@ -121,7 +121,7 @@ class DeactivateAccountHandler(BaseHandler): None """ if not self._user_parter_running: - run_in_background(self._user_parter_loop) + run_as_background_process("user_parter_loop", self._user_parter_loop) @defer.inlineCallbacks def _user_parter_loop(self): diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index d8413d6aa7..f11b430126 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -20,6 +20,7 @@ from six import iteritems from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.roommember import ProfileInfo from synapse.types import get_localpart_from_id from synapse.util.metrics import Measure @@ -98,7 +99,6 @@ class UserDirectoryHandler(object): """ return self.store.search_user_dir(user_id, search_term, limit) - @defer.inlineCallbacks def notify_new_event(self): """Called when there may be more deltas to process """ @@ -108,11 +108,15 @@ class UserDirectoryHandler(object): if self._is_processing: return + @defer.inlineCallbacks + def process(): + try: + yield self._unsafe_process() + finally: + self._is_processing = False + self._is_processing = True - try: - yield self._unsafe_process() - finally: - self._is_processing = False + run_as_background_process("user_directory.notify_new_event", process) @defer.inlineCallbacks def handle_local_profile_change(self, user_id, profile): -- cgit 1.4.1 From 6105c6101fa0f4560daf7d23dedcfd217530b02a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:24:58 +0100 Subject: fix race condiftion in calling initialise_reserved_users --- changelog.d/4081.bugfix | 2 ++ synapse/app/homeserver.py | 8 ------ synapse/storage/monthly_active_users.py | 46 +++++++++++++++++++++--------- synapse/storage/registration.py | 16 ++++++++--- tests/storage/test_monthly_active_users.py | 10 +++++-- 5 files changed, 55 insertions(+), 27 deletions(-) create mode 100644 changelog.d/4081.bugfix diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix new file mode 100644 index 0000000000..f275acb61c --- /dev/null +++ b/changelog.d/4081.bugfix @@ -0,0 +1,2 @@ +Fix race condition in populating reserved users + diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0b85b377e3..593e1e75db 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -553,14 +553,6 @@ def run(hs): generate_monthly_active_users, ) - # XXX is this really supposed to be a background process? it looks - # like it needs to complete before some of the other stuff runs. - run_as_background_process( - "initialise_reserved_users", - hs.get_datastore().initialise_reserved_users, - hs.config.mau_limits_reserved_threepids, - ) - start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 0fe8c8e24c..26e577814a 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,19 +33,28 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () + self.initialise_reserved_users( + dbconn.cursor(), hs.config.mau_limits_reserved_threepids + ) - @defer.inlineCallbacks - def initialise_reserved_users(self, threepids): - store = self.hs.get_datastore() + def initialise_reserved_users(self, txn, threepids): + """ + Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Arguments: + threepids []: List of threepid dicts to reserve + """ reserved_user_list = [] # Do not add more reserved users than the total allowable number for tp in threepids[:self.hs.config.max_mau_value]: - user_id = yield store.get_user_id_by_threepid( + user_id = self.get_user_id_by_threepid_txn( + txn, tp["medium"], tp["address"] ) if user_id: - yield self.upsert_monthly_active_user(user_id) + self.upsert_monthly_active_user_txn(txn, user_id) reserved_user_list.append(user_id) else: logger.warning( @@ -55,8 +64,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def reap_monthly_active_users(self): - """ - Cleans out monthly active user table to ensure that no stale + """Cleans out monthly active user table to ensure that no stale entries exist. Returns: @@ -165,19 +173,33 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): + """Updates or inserts monthly active user member + Arguments: + user_id (str): user to add/update + """ + is_insert = yield self.runInteraction( + "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, + user_id + ) + if is_insert: + self.user_last_seen_monthly_active.invalidate((user_id,)) + self.get_monthly_active_count.invalidate(()) + + def upsert_monthly_active_user_txn(self, txn, user_id): """ Updates or inserts monthly active user member Arguments: + txn (cursor): user_id (str): user to add/update - Deferred[bool]: True if a new entry was created, False if an + bool: True if a new entry was created, False if an existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more - is_insert = yield self._simple_upsert( - desc="upsert_monthly_active_user", + is_insert = self._simple_upsert_txn( + txn, table="monthly_active_users", keyvalues={ "user_id": user_id, @@ -186,9 +208,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) - if is_insert: - self.user_last_seen_monthly_active.invalidate((user_id,)) - self.get_monthly_active_count.invalidate(()) + return is_insert @cached(num_args=1) def user_last_seen_monthly_active(self, user_id): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26b429e307..01931f29cc 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,17 +474,25 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): - ret = yield self._simple_select_one( + user_id = yield self.runInteraction( + "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, + medium, address + ) + defer.returnValue(user_id) + + def get_user_id_by_threepid_txn(self, txn, medium, address): + ret = self._simple_select_one_txn( + txn, "user_threepids", { "medium": medium, "address": address }, - ['user_id'], True, 'get_user_id_by_threepid' + ['user_id'], True ) if ret: - defer.returnValue(ret['user_id']) - defer.returnValue(None) + return ret['user_id'] + return None def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 686f12a0dc..0c17745ae7 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -52,7 +52,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - self.store.initialise_reserved_users(threepids) + + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) self.pump() active_count = self.store.get_monthly_active_count() @@ -199,7 +202,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): {'medium': 'email', 'address': user2_email}, ] self.hs.config.mau_limits_reserved_threepids = threepids - self.store.initialise_reserved_users(threepids) + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) + self.pump() count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) -- cgit 1.4.1 From 329d18b39cbc1bc9eba8ae9de1fcf734d0cf1a78 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:27:20 +0100 Subject: remove white space --- changelog.d/4081.bugfix | 1 - synapse/storage/monthly_active_users.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index f275acb61c..13dad58842 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1,2 +1 @@ Fix race condition in populating reserved users - diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 26e577814a..cf15f8c5ba 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -38,8 +38,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): ) def initialise_reserved_users(self, txn, threepids): - """ - Ensures that reserved threepids are accounted for in the MAU table, should + """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. Arguments: -- cgit 1.4.1 From 3e704822be40a7d1d3fa82bf554f6a85823b5686 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 23 Oct 2018 10:25:31 -0600 Subject: Comments help --- synapse/handlers/groups_local.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index cb7e7d49cb..173315af6c 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -41,6 +41,11 @@ def _create_rerouter(func_name): destination, group_id, *args, **kwargs ) + # Capture errors returned by the remote homeserver and + # re-throw specific errors as SynapseErrors. This is so + # when the remote end responds with things like 403 Not + # In Group, we can communicate that to the client instead + # of a 500. def h(failure): failure.trap(HttpResponseException) e = failure.value -- cgit 1.4.1 From a67d8ace9bc8b5f5ab953fdcfd6ade077337782d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 17:44:39 +0100 Subject: remove errant exception and style --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 79c2711fb4..1e33f1b3b8 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto-join rooms, should automatically create those rooms when first user registers +Servers with auto-join rooms, will now automatically create those rooms when the first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4b9bf6f2d1..5df321b287 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -18,7 +18,7 @@ from distutils.util import strtobool from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config, ConfigError +from synapse.config._base import Config, ConfigError class RegistrationConfig(Config): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1b5873c8d7..9615dd552f 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -231,15 +231,15 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - if self.hs.hostname != RoomAlias.from_string(r).domain: - logger.warn( + room_alias = RoomAlias.from_string(r) + if self.hs.hostname != room_alias.domain: + logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain' % (r,) + 'it does not match server domain', (r,) ) - raise SynapseError() else: # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart + room_alias_localpart = room_alias.localpart yield self.room_creation_handler.create_room( fake_requester, config={ -- cgit 1.4.1 From e564306e315fc3dfd37e5fed495ae300fbb58c8a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 09:23:33 +0100 Subject: sanity-check the is_processing flag ... and rename it, for even more sanity --- synapse/push/emailpusher.py | 11 +++++++---- synapse/push/httppusher.py | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 0bc548ae7a..f369124258 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -70,7 +70,7 @@ class EmailPusher(object): # See httppusher self.max_stream_ordering = None - self.processing = False + self._is_processing = False def on_started(self): if self.mailer is not None: @@ -99,15 +99,18 @@ class EmailPusher(object): self._start_processing() def _start_processing(self): - if self.processing: + if self._is_processing: return run_as_background_process("emailpush.process", self._process) @defer.inlineCallbacks def _process(self): + # we should never get here if we are already processing + assert not self._is_processing + try: - self.processing = True + self._is_processing = True if self.throttle_params is None: # this is our first loop: load up the throttle params @@ -126,7 +129,7 @@ class EmailPusher(object): if self.max_stream_ordering == starting_max_ordering: break finally: - self.processing = False + self._is_processing = False @defer.inlineCallbacks def _unsafe_process(self): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 33034d44da..6bd703632d 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -60,7 +60,7 @@ class HttpPusher(object): self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.failing_since = pusherdict['failing_since'] self.timed_call = None - self.processing = False + self._is_processing = False # This is the highest stream ordering we know it's safe to process. # When new events arrive, we'll be given a window of new events: we @@ -122,15 +122,18 @@ class HttpPusher(object): self.timed_call = None def _start_processing(self): - if self.processing: + if self._is_processing: return run_as_background_process("httppush.process", self._process) @defer.inlineCallbacks def _process(self): + # we should never get here if we are already processing + assert not self._is_processing + try: - self.processing = True + self._is_processing = True # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: @@ -142,7 +145,7 @@ class HttpPusher(object): if self.max_stream_ordering == starting_max_ordering: break finally: - self.processing = False + self._is_processing = False @defer.inlineCallbacks def _unsafe_process(self): -- cgit 1.4.1 From c573794b22a47a0f03c80f2732922c3375e7b74c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 09:24:55 +0100 Subject: Fix start_pushers vs _start_pushers confusion --- synapse/push/pusherpool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d25877cc66..5a4e73ccd6 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -41,7 +41,7 @@ class PusherPool: def __init__(self, _hs): self.hs = _hs self.pusher_factory = PusherFactory(_hs) - self.start_pushers = _hs.config.start_pushers + self._should_start_pushers = _hs.config.start_pushers self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() self.pushers = {} @@ -49,7 +49,7 @@ class PusherPool: def start(self): """Starts the pushers off in a background process. """ - if not self.start_pushers: + if not self._should_start_pushers: logger.info("Not starting pushers because they are disabled in the config") return run_as_background_process("start_pushers", self._start_pushers) @@ -175,7 +175,7 @@ class PusherPool: @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): """Look up the details for the given pusher, and start it""" - if not self._start_pushers: + if not self._should_start_pushers: return resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( -- cgit 1.4.1 From 810715f79aafc586e70e9dcd3da4a2cc6823f704 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Oct 2018 09:44:22 +0100 Subject: Rename resolve_events_with_factory --- synapse/handlers/federation.py | 4 ++-- synapse/state/__init__.py | 14 +++++++------- synapse/state/v1.py | 2 +- synapse/state/v2.py | 2 +- tests/state/test_v2.py | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b028d58ae4..91a18a552c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -53,7 +53,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet -from synapse.state import StateResolutionStore, resolve_events_with_factory +from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.types import UserID, get_domain_from_id from synapse.util import logcontext, unwrapFirstError from synapse.util.async_helpers import Linearizer @@ -385,7 +385,7 @@ class FederationHandler(BaseHandler): event_map[x.event_id] = x room_version = yield self.store.get_room_version(room_id) - state_map = yield resolve_events_with_factory( + state_map = yield resolve_events_with_store( room_version, state_maps, event_map, state_res_store=StateResolutionStore(self.store), ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 836e137296..9b40b18d5b 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -394,7 +394,7 @@ class StateHandler(object): } with Measure(self.clock, "state._resolve_events"): - new_state = yield resolve_events_with_factory( + new_state = yield resolve_events_with_store( room_version, state_set_ids, event_map=state_map, state_res_store=StateResolutionStore(self.store), @@ -478,10 +478,10 @@ class StateResolutionHandler(object): # start by assuming we won't have any conflicted state, and build up the new # state map by iterating through the state groups. If we discover a conflict, - # we give up and instead use `resolve_events_with_factory`. + # we give up and instead use `resolve_events_with_store`. # # XXX: is this actually worthwhile, or should we just let - # resolve_events_with_factory do it? + # resolve_events_with_store do it? new_state = {} conflicted_state = False for st in itervalues(state_groups_ids): @@ -496,7 +496,7 @@ class StateResolutionHandler(object): if conflicted_state: logger.info("Resolving conflicted state for %r", room_id) with Measure(self.clock, "state._resolve_events"): - new_state = yield resolve_events_with_factory( + new_state = yield resolve_events_with_store( room_version, list(itervalues(state_groups_ids)), event_map=event_map, @@ -581,7 +581,7 @@ def _make_state_cache_entry( ) -def resolve_events_with_factory(room_version, state_sets, event_map, state_res_store): +def resolve_events_with_store(room_version, state_sets, event_map, state_res_store): """ Args: room_version(str): Version of the room @@ -604,11 +604,11 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_res_s a map from (type, state_key) to event_id. """ if room_version == RoomVersions.V1: - return v1.resolve_events_with_factory( + return v1.resolve_events_with_store( state_sets, event_map, state_res_store.get_events, ) elif room_version == RoomVersions.VDH_TEST: - return v2.resolve_events_with_factory( + return v2.resolve_events_with_store( state_sets, event_map, state_res_store, ) else: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 7a7157b352..70a981f4a2 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -31,7 +31,7 @@ POWER_KEY = (EventTypes.PowerLevels, "") @defer.inlineCallbacks -def resolve_events_with_factory(state_sets, event_map, state_map_factory): +def resolve_events_with_store(state_sets, event_map, state_map_factory): """ Args: state_sets(list): List of dicts of (type, state_key) -> event_id, diff --git a/synapse/state/v2.py b/synapse/state/v2.py index d034b4f38e..5d06f7e928 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) @defer.inlineCallbacks -def resolve_events_with_factory(state_sets, event_map, state_res_store): +def resolve_events_with_store(state_sets, event_map, state_res_store): """Resolves the state using the v2 state resolution algorithm Args: diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index b96bb6b25c..73f2ef7557 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -24,7 +24,7 @@ from synapse.event_auth import auth_types_for_event from synapse.events import FrozenEvent from synapse.state.v2 import ( lexicographical_topological_sort, - resolve_events_with_factory, + resolve_events_with_store, ) from synapse.types import EventID @@ -541,7 +541,7 @@ class StateTestCase(unittest.TestCase): elif len(prev_events) == 1: state_before = dict(state_at_event[prev_events[0]]) else: - state_d = resolve_events_with_factory( + state_d = resolve_events_with_store( [state_at_event[n] for n in prev_events], event_map=event_map, state_res_store=TestStateResolutionStore(event_map), -- cgit 1.4.1 From dacbeb2e03274d347bd3d3919a00a56661dbb002 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Oct 2018 09:47:49 +0100 Subject: Comment --- synapse/handlers/federation.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 91a18a552c..1daa08df7e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -390,6 +390,11 @@ class FederationHandler(BaseHandler): state_res_store=StateResolutionStore(self.store), ) + # We need to give _process_received_pdu the actual state events + # rather than event ids, so generate that now. + + # First though we need to fetch all the events that are in + # state_map, so we can build up the state below. evs = yield self.store.get_events( list(state_map.values()), get_prev_content=False, @@ -397,8 +402,6 @@ class FederationHandler(BaseHandler): ) event_map.update(evs) - # we need to give _process_received_pdu the actual state events - # rather than event ids, so generate that now. state = [ event_map[e] for e in six.itervalues(state_map) ] -- cgit 1.4.1 From 47a9ba435d9a4b9e311d9d9a3c02be105942f357 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 10:26:50 +0100 Subject: Use match rather than search --- synapse/config/room_directory.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 41ef3217e8..2ca010afdb 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -92,10 +92,11 @@ class _AliasRule(object): boolean """ - if not self._user_id_regex.search(user_id): + # Note: The regexes are anchored at both ends + if not self._user_id_regex.match(user_id): return False - if not self._alias_regex.search(alias): + if not self._alias_regex.match(alias): return False return True -- cgit 1.4.1 From b313b9b009e3344ff680bce0fe6cce160117c83f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Oct 2018 10:02:41 +0100 Subject: isort --- tests/state/test_v2.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 73f2ef7557..efd85ebe6c 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -22,10 +22,7 @@ import attr from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.event_auth import auth_types_for_event from synapse.events import FrozenEvent -from synapse.state.v2 import ( - lexicographical_topological_sort, - resolve_events_with_store, -) +from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store from synapse.types import EventID from tests import unittest -- cgit 1.4.1 From ef771cc4c2d988e5188ba7e75df9adebb0ebafe1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 10:35:01 +0100 Subject: Fix a number of flake8 errors Broadly three things here: * disable W504 which seems a bit whacko * remove a bunch of `as e` expressions from exception handlers that don't use them * use `r""` for strings which include backslashes Also, we don't use pep8 any more, so we can get rid of the duplicate config there. --- changelog.d/4082.misc | 1 + scripts-dev/tail-synapse.py | 2 +- scripts/synapse_port_db | 3 ++- setup.cfg | 17 ++++++++--------- synapse/config/repository.py | 2 +- synapse/crypto/keyclient.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/storage/registration.py | 2 +- tests/config/test_generate.py | 2 +- tests/events/test_utils.py | 4 ++-- 12 files changed, 21 insertions(+), 20 deletions(-) create mode 100644 changelog.d/4082.misc diff --git a/changelog.d/4082.misc b/changelog.d/4082.misc new file mode 100644 index 0000000000..a81faf5e9b --- /dev/null +++ b/changelog.d/4082.misc @@ -0,0 +1 @@ +Clean up some bits of code which were flagged by the linter diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py index 1a36b94038..7c9985d9f0 100644 --- a/scripts-dev/tail-synapse.py +++ b/scripts-dev/tail-synapse.py @@ -48,7 +48,7 @@ def main(): row.name: row.position for row in replicate(server, {"streams": "-1"})["streams"].rows } - except requests.exceptions.ConnectionError as e: + except requests.exceptions.ConnectionError: time.sleep(0.1) while True: diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 2f6e69e552..3c7b606323 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -501,7 +501,8 @@ class Porter(object): try: yield self.postgres_store.runInteraction("alter_table", alter_table) - except Exception as e: + except Exception: + # On Error Resume Next pass yield self.postgres_store.runInteraction( diff --git a/setup.cfg b/setup.cfg index 52feaa9cc7..b6b4aa740d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,17 +14,16 @@ ignore = pylint.cfg tox.ini -[pep8] -max-line-length = 90 -# W503 requires that binary operators be at the end, not start, of lines. Erik -# doesn't like it. E203 is contrary to PEP8. E731 is silly. -ignore = W503,E203,E731 - [flake8] -# note that flake8 inherits the "ignore" settings from "pep8" (because it uses -# pep8 to do those checks), but not the "max-line-length" setting max-line-length = 90 -ignore=W503,E203,E731 + +# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes +# for error codes. The ones we ignore are: +# W503: line break before binary operator +# W504: line break after binary operator +# E203: whitespace before ':' (which is contrary to pep8?) +# E731: do not assign a lambda expression, use a def +ignore=W503,W504,E203,E731 [isort] line_length = 89 diff --git a/synapse/config/repository.py b/synapse/config/repository.py index fc909c1fac..06c62ab62c 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -178,7 +178,7 @@ class ContentRepositoryConfig(Config): def default_config(self, **kwargs): media_store = self.default_path("media_store") uploads_path = self.default_path("uploads") - return """ + return r""" # Directory where uploaded images and attachments are stored. media_store_path: "%(media_store)s" diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 57d4665e84..080c81f14b 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -55,7 +55,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): raise IOError("Cannot get key for %r" % server_name) except (ConnectError, DomainError) as e: logger.warn("Error getting key for %r: %s", server_name, e) - except Exception as e: + except Exception: logger.exception("Error getting key for %r", server_name) raise IOError("Cannot get key for %r" % server_name) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 4efe95faa4..af0107a46e 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -800,7 +800,7 @@ class FederationHandlerRegistry(object): yield handler(origin, content) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) - except Exception as e: + except Exception: logger.exception("Failed to handle edu %r", edu_type) def on_query(self, query_type, args): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bd8b5f4afa..693b303881 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -99,7 +99,7 @@ class AuthRestServlet(RestServlet): cannot be handled in the normal flow (with requests to the same endpoint). Current use is for web fallback auth. """ - PATTERNS = client_v2_patterns("/auth/(?P[\w\.]*)/fallback/web") + PATTERNS = client_v2_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): super(AuthRestServlet, self).__init__() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 8c892ff187..1a7bfd6b56 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -674,7 +674,7 @@ def summarize_paragraphs(text_nodes, min_size=200, max_size=500): # This splits the paragraph into words, but keeping the # (preceeding) whitespace intact so we can easily concat # words back together. - for match in re.finditer("\s*\S+", description): + for match in re.finditer(r"\s*\S+", description): word = match.group() # Keep adding words while the total length is less than diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26b429e307..2dd14aba1c 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -567,7 +567,7 @@ class RegistrationStore(RegistrationWorkerStore, def _find_next_generated_user_id(txn): txn.execute("SELECT name FROM users") - regex = re.compile("^@(\d+):") + regex = re.compile(r"^@(\d+):") found = set() diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index f88d28a19d..0c23068bcf 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -67,6 +67,6 @@ class ConfigGenerationTestCase(unittest.TestCase): with open(log_config_file) as f: config = f.read() # find the 'filename' line - matches = re.findall("^\s*filename:\s*(.*)$", config, re.M) + matches = re.findall(r"^\s*filename:\s*(.*)$", config, re.M) self.assertEqual(1, len(matches)) self.assertEqual(matches[0], expected) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index ff217ca8b9..d0cc492deb 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -156,7 +156,7 @@ class SerializeEventTestCase(unittest.TestCase): room_id="!foo:bar", content={"key.with.dots": {}}, ), - ["content.key\.with\.dots"], + [r"content.key\.with\.dots"], ), {"content": {"key.with.dots": {}}}, ) @@ -172,7 +172,7 @@ class SerializeEventTestCase(unittest.TestCase): "nested.dot.key": {"leaf.key": 42, "not_me_either": 1}, }, ), - ["content.nested\.dot\.key.leaf\.key"], + [r"content.nested\.dot\.key.leaf\.key"], ), {"content": {"nested.dot.key": {"leaf.key": 42}}}, ) -- cgit 1.4.1 From 7e07d25ed64e6af403dd508a2b5226e8d18fc1ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 10:41:45 +0100 Subject: Allow backslashes in event field filters Fixes a bug introduced in https://github.com/matrix-org/synapse/pull/1783 which meant that single backslashes were not allowed in event field filters. The intention here is to allow single-backslashes, but disallow double-backslashes. --- changelog.d/4083.bugfix | 1 + synapse/api/filtering.py | 5 ++++- tests/api/test_filtering.py | 12 +++++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/4083.bugfix diff --git a/changelog.d/4083.bugfix b/changelog.d/4083.bugfix new file mode 100644 index 0000000000..b3b08cdfa6 --- /dev/null +++ b/changelog.d/4083.bugfix @@ -0,0 +1 @@ +Fix bug which prevented backslashes being used in event field filters \ No newline at end of file diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index eed8c67e6a..677c0bdd4c 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -172,7 +172,10 @@ USER_FILTER_SCHEMA = { # events a lot easier as we can then use a negative lookbehind # assertion to split '\.' If we allowed \\ then it would # incorrectly split '\\.' See synapse.events.utils.serialize_event - "pattern": "^((?!\\\).)*$" + # + # Note that because this is a regular expression, we have to escape + # each backslash in the pattern. + "pattern": r"^((?!\\\\).)*$" } } }, diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 48b2d3d663..2a7044801a 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -60,7 +60,7 @@ class FilteringTestCase(unittest.TestCase): invalid_filters = [ {"boom": {}}, {"account_data": "Hello World"}, - {"event_fields": ["\\foo"]}, + {"event_fields": [r"\\foo"]}, {"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}}, {"event_format": "other"}, {"room": {"not_rooms": ["#foo:pik-test"]}}, @@ -109,6 +109,16 @@ class FilteringTestCase(unittest.TestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, + + # a single backslash should be permitted (though it is debatable whether + # it should be permitted before anything other than `.`, and what that + # actually means) + # + # (note that event_fields is implemented in + # synapse.events.utils.serialize_event, and so whether this actually works + # is tested elsewhere. We just want to check that it is allowed through the + # filter validation) + {"event_fields": [r"foo\.bar"]}, ] for filter in valid_filters: try: -- cgit 1.4.1 From 9f72c209eed4bec78133ca9821961eb9a3cf0dad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Oct 2018 14:37:36 +0100 Subject: Update changelog.d/3975.feature Co-Authored-By: neilisfragile --- changelog.d/3975.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 1e33f1b3b8..162f30a532 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto-join rooms, will now automatically create those rooms when the first user registers +Servers with auto-join rooms will now automatically create those rooms when the first user registers -- cgit 1.4.1 From 94a49e0636313507fab2e43b99f969385f535ea2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Oct 2018 14:39:23 +0100 Subject: fix tuple Co-Authored-By: neilisfragile --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 9615dd552f..217928add1 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,8 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', (r,) + 'it does not match server domain', + r, ) else: # create room expects the localpart of the room alias -- cgit 1.4.1 From 56a05583ae395bdd81c64b3f637a0827d7e99124 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 14:43:29 +0100 Subject: Disable travis-ci branch builds for most branches (We really don't need to kick off 10 builds for the branch as well as 10 for the PR) --- .travis.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.travis.yml b/.travis.yml index 7ee1a278db..1be94515f2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,12 @@ language: python # tell travis to cache ~/.cache/pip cache: pip +# only build branches we care about (PRs are built seperately) +branches: + only: + - master + - develop + before_script: - git remote set-branches --add origin develop - git fetch origin develop -- cgit 1.4.1 From 3e438bfec8d407ab78ae5429d9cfcd90cc475bdd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 15:04:55 +0100 Subject: also build on release branches --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 1be94515f2..95ef4a63d0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,7 @@ branches: only: - master - develop + - /release-v.*/ before_script: - git remote set-branches --add origin develop -- cgit 1.4.1 From 0f4fb537ce3ff8e8a57c4dd37e9b0bab9c7a7fe2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 15:09:05 +0100 Subject: fix branch regexp --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 95ef4a63d0..197dec2bc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ branches: only: - master - develop - - /release-v.*/ + - /^release-v/ before_script: - git remote set-branches --add origin develop -- cgit 1.4.1 From ab96ee29c96d10638d6ae69a80521aa8c4f19113 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 13:41:31 +0100 Subject: reduce git clone depth --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 197dec2bc9..5b6fc47cee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,10 @@ language: python # tell travis to cache ~/.cache/pip cache: pip +# don't clone the whole repo history, one commit will do +git: + depth: 1 + # only build branches we care about (PRs are built seperately) branches: only: -- cgit 1.4.1 From 480d98c91f32da5127b695047b912b889d0b9dc2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 14:49:25 +0100 Subject: Disable newsfragment checks on branch builds --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5b6fc47cee..107b3ab70b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -53,7 +53,9 @@ matrix: - python: 3.6 env: TOX_ENV=check_isort - - python: 3.6 + - # we only need to check for the newsfragment if it's a PR build + if: type = pull_request + python: 3.6 env: TOX_ENV=check-newsfragment install: -- cgit 1.4.1 From 83d9ca71225239e2f4ba27dc79907e93d8573d1b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 13:48:03 +0100 Subject: only fetch develop for check-newsfragments --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 107b3ab70b..56e10dbdc6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,10 +15,6 @@ branches: - develop - /^release-v/ -before_script: - - git remote set-branches --add origin develop - - git fetch origin develop - matrix: fast_finish: true include: @@ -57,6 +53,10 @@ matrix: if: type = pull_request python: 3.6 env: TOX_ENV=check-newsfragment + script: + - git remote set-branches --add origin develop + - git fetch origin develop + - tox -e $TOX_ENV install: - pip install tox -- cgit 1.4.1 From 9532caf6ef3cdc6dba80b11a920410b50d3490dd Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:08:25 +0100 Subject: remove trailing whiter space --- synapse/handlers/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 217928add1..e9d7b25a36 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,7 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', + 'it does not match server domain', r, ) else: -- cgit 1.4.1 From 9ec218658650558a704e939e56f87d1df1a84423 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:09:21 +0100 Subject: isort --- synapse/config/registration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 5df321b287..7480ed5145 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool +from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from synapse.config._base import Config, ConfigError - class RegistrationConfig(Config): -- cgit 1.4.1 From 663d9db8e7892b79a927de70ec9add64312827d5 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:17:30 +0100 Subject: commit transaction before closing --- synapse/server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/server.py b/synapse/server.py index 3e9d3d8256..cf6b872cbd 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -207,6 +207,7 @@ class HomeServer(object): logger.info("Setting up.") with self.get_db_conn() as conn: self.datastore = self.DATASTORE_CLASS(conn, self) + conn.commit() logger.info("Finished setting up.") def get_reactor(self): -- cgit 1.4.1 From ea69a84bbb2fc9c1da6db0384a98f577f3bc95a7 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:18:08 +0100 Subject: fix style inconsistencies --- changelog.d/4081.bugfix | 3 ++- synapse/storage/monthly_active_users.py | 43 +++++++++++++++++++----------- synapse/storage/registration.py | 19 +++++++++++++ tests/storage/test_monthly_active_users.py | 4 +-- 4 files changed, 51 insertions(+), 18 deletions(-) diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index 13dad58842..cfe4b3e9d9 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1 +1,2 @@ -Fix race condition in populating reserved users +Fix race condition where config defined reserved users were not being added to +the monthly active user list prior to the homeserver reactor firing up diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index cf15f8c5ba..9a5c3b7eda 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,21 +33,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () - self.initialise_reserved_users( - dbconn.cursor(), hs.config.mau_limits_reserved_threepids + # Do not add more reserved users than the total allowable number + self._initialise_reserved_users( + dbconn.cursor(), + hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value], ) - def initialise_reserved_users(self, txn, threepids): + def _initialise_reserved_users(self, txn, threepids): """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. - Arguments: - threepids []: List of threepid dicts to reserve + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve """ reserved_user_list = [] - # Do not add more reserved users than the total allowable number - for tp in threepids[:self.hs.config.max_mau_value]: + for tp in threepids: user_id = self.get_user_id_by_threepid_txn( txn, tp["medium"], tp["address"] @@ -172,26 +174,36 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): - """Updates or inserts monthly active user member - Arguments: + """Updates or inserts the user into the monthly active user table, which + is used to track the current MAU usage of the server + + Args: user_id (str): user to add/update """ is_insert = yield self.runInteraction( "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) + # Considered pushing cache invalidation down into txn method, but + # did not because txn is not a LoggingTransaction. This means I could not + # call txn.call_after(). Therefore cache is altered in background thread + # and calls from elsewhere to user_last_seen_monthly_active and + # get_monthly_active_count fail with ValueError in + # synapse/util/caches/descriptors.py#check_thread if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): - """ - Updates or inserts monthly active user member - Arguments: - txn (cursor): - user_id (str): user to add/update + """Updates or inserts monthly active user member + + Args: + txn (cursor): + user_id (str): user to add/update + + Returns: bool: True if a new entry was created, False if an - existing one was updated. + existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple @@ -207,6 +219,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) + return is_insert @cached(num_args=1) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 0f970850e8..80d76bf9d7 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,6 +474,15 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): + """Returns user id from threepid + + Args: + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + Deferred[str|None]: user id or None if no user id/threepid mapping exists + """ user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address @@ -481,6 +490,16 @@ class RegistrationStore(RegistrationWorkerStore, defer.returnValue(user_id) def get_user_id_by_threepid_txn(self, txn, medium, address): + """Returns user id from threepid + + Args: + txn (cursor): + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + str|None: user id or None if no user id/threepid mapping exists + """ ret = self._simple_select_one_txn( txn, "user_threepids", diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0c17745ae7..832e379a83 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -54,7 +54,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.store.user_add_threepid(user2, "email", user2_email, now, now) self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() @@ -203,7 +203,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): ] self.hs.config.mau_limits_reserved_threepids = threepids self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() -- cgit 1.4.1 From 9283987f7e5c7b678ddf01ffcac888917877ae63 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 18 Oct 2018 13:03:46 -0600 Subject: Fix test Debug tests Try printing the channel fix Import and use six Remove debugging Disable captcha Add some mocks Define the URL Fix the clock? Less rendering? use the other render Complete the dummy auth stage Fix last stage of the test Remove mocks we don't need --- tests/rest/client/v2_alpha/test_register.py | 75 +++++++++++++++++++---------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 36eaabbad8..5dbd16fd9b 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,4 +1,5 @@ import json +import six from mock import Mock @@ -175,18 +176,35 @@ class RegisterRestServletTestCase(unittest.TestCase): self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals(channel.json_body["error"], "Guest access is disabled") + +class TermsTestCase(unittest.HomeserverTestCase): + servlets = [register_servlets] + + def prepare(self, reactor, clock, hs): + self.clock = MemoryReactorClock() + self.hs_clock = Clock(self.clock) + self.url = "/_matrix/client/r0/register" + self.registration_handler = Mock() + self.auth_handler = Mock() + self.device_handler = Mock() + hs.config.enable_registration = True + hs.config.registrations_require_3pid = [] + hs.config.auto_join_rooms = [] + hs.config.enable_registration_captcha = False + def test_POST_terms_auth(self): self.hs.config.block_events_without_consent_error = True self.hs.config.public_baseurl = "https://example.org" self.hs.config.user_consent_version = "1.0" # Do a UI auth request - request, channel = make_request(b"POST", self.url, b"{}") - render(request, self.resource, self.clock) + request, channel = self.make_request(b"POST", self.url, b"{}") + self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertIsInstance(channel.json_body["session"], str) + self.assertTrue(channel.json_body is not None) + self.assertIsInstance(channel.json_body["session"], six.text_type) self.assertIsInstance(channel.json_body["flows"], list) for flow in channel.json_body["flows"]: @@ -210,40 +228,47 @@ class RegisterRestServletTestCase(unittest.TestCase): self.assertIsInstance(channel.json_body["params"], dict) self.assertDictContainsSubset(channel.json_body["params"], expected_params) - # Completing the stage should result in the stage being completed - - user_id = "@kermit:muppet" - token = "kermits_access_token" - device_id = "frogfone" + # We have to complete the dummy auth stage before completing the terms stage request_data = json.dumps( { "username": "kermit", "password": "monkey", - "device_id": device_id, "auth": { "session": channel.json_body["session"], - "type": "m.login.terms", + "type": "m.login.dummy", }, } ) + self.registration_handler.check_username = Mock(return_value=True) - self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) - self.registration_handler.register = Mock(return_value=(user_id, None)) - self.auth_handler.get_access_token_for_user_id = Mock(return_value=token) - self.device_handler.check_device_registered = Mock(return_value=device_id) + request, channel = make_request(b"POST", self.url, request_data) + self.render(request) + # We don't bother checking that the response is correct - we'll leave that to + # other tests. We just want to make sure we're on the right path. + self.assertEquals(channel.result["code"], b"401", channel.result) + + # Finish the UI auth for terms + request_data = json.dumps( + { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.terms", + }, + } + ) request, channel = make_request(b"POST", self.url, request_data) - render(request, self.resource, self.clock) + self.render(request) + + # We're interested in getting a response that looks like a successful registration, + # not so much that the details are exactly what we want. - det_data = { - "user_id": user_id, - "access_token": token, - "home_server": self.hs.hostname, - "device_id": device_id, - } self.assertEquals(channel.result["code"], b"200", channel.result) - self.assertDictContainsSubset(det_data, channel.json_body) - self.auth_handler.get_login_tuple_for_user_id( - user_id, device_id=device_id, initial_device_display_name=None - ) + + self.assertTrue(channel.json_body is not None) + self.assertIsInstance(channel.json_body["user_id"], six.text_type) + self.assertIsInstance(channel.json_body["access_token"], six.text_type) + self.assertIsInstance(channel.json_body["device_id"], six.text_type) -- cgit 1.4.1 From 4acb6fe8a3c09e0b69bca047c5918ce44d5927f1 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 24 Oct 2018 13:24:24 -0600 Subject: Move test to where the other integration tests are --- tests/rest/client/v2_alpha/test_register.py | 97 ------------------------- tests/test_terms_auth.py | 109 ++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 97 deletions(-) create mode 100644 tests/test_terms_auth.py diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 5dbd16fd9b..5deae1c4dd 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -175,100 +175,3 @@ class RegisterRestServletTestCase(unittest.TestCase): self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals(channel.json_body["error"], "Guest access is disabled") - - -class TermsTestCase(unittest.HomeserverTestCase): - servlets = [register_servlets] - - def prepare(self, reactor, clock, hs): - self.clock = MemoryReactorClock() - self.hs_clock = Clock(self.clock) - self.url = "/_matrix/client/r0/register" - self.registration_handler = Mock() - self.auth_handler = Mock() - self.device_handler = Mock() - hs.config.enable_registration = True - hs.config.registrations_require_3pid = [] - hs.config.auto_join_rooms = [] - hs.config.enable_registration_captcha = False - - def test_POST_terms_auth(self): - self.hs.config.block_events_without_consent_error = True - self.hs.config.public_baseurl = "https://example.org" - self.hs.config.user_consent_version = "1.0" - - # Do a UI auth request - request, channel = self.make_request(b"POST", self.url, b"{}") - self.render(request) - - self.assertEquals(channel.result["code"], b"401", channel.result) - - self.assertTrue(channel.json_body is not None) - self.assertIsInstance(channel.json_body["session"], six.text_type) - - self.assertIsInstance(channel.json_body["flows"], list) - for flow in channel.json_body["flows"]: - self.assertIsInstance(flow["stages"], list) - self.assertTrue(len(flow["stages"]) > 0) - self.assertEquals(flow["stages"][-1], "m.login.terms") - - expected_params = { - "m.login.terms": { - "policies": { - "privacy_policy": { - "en": { - "name": "Privacy Policy", - "url": "https://example.org/_matrix/consent", - }, - "version": "1.0" - }, - }, - }, - } - self.assertIsInstance(channel.json_body["params"], dict) - self.assertDictContainsSubset(channel.json_body["params"], expected_params) - - # We have to complete the dummy auth stage before completing the terms stage - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.dummy", - }, - } - ) - - self.registration_handler.check_username = Mock(return_value=True) - - request, channel = make_request(b"POST", self.url, request_data) - self.render(request) - - # We don't bother checking that the response is correct - we'll leave that to - # other tests. We just want to make sure we're on the right path. - self.assertEquals(channel.result["code"], b"401", channel.result) - - # Finish the UI auth for terms - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.terms", - }, - } - ) - request, channel = make_request(b"POST", self.url, request_data) - self.render(request) - - # We're interested in getting a response that looks like a successful registration, - # not so much that the details are exactly what we want. - - self.assertEquals(channel.result["code"], b"200", channel.result) - - self.assertTrue(channel.json_body is not None) - self.assertIsInstance(channel.json_body["user_id"], six.text_type) - self.assertIsInstance(channel.json_body["access_token"], six.text_type) - self.assertIsInstance(channel.json_body["device_id"], six.text_type) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py new file mode 100644 index 0000000000..0d95ae09b7 --- /dev/null +++ b/tests/test_terms_auth.py @@ -0,0 +1,109 @@ +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class TermsTestCase(unittest.HomeserverTestCase): + servlets = [register_servlets] + + def prepare(self, reactor, clock, hs): + self.clock = MemoryReactorClock() + self.hs_clock = Clock(self.clock) + self.url = "/_matrix/client/r0/register" + self.registration_handler = Mock() + self.auth_handler = Mock() + self.device_handler = Mock() + hs.config.enable_registration = True + hs.config.registrations_require_3pid = [] + hs.config.auto_join_rooms = [] + hs.config.enable_registration_captcha = False + + def test_ui_auth(self): + self.hs.config.block_events_without_consent_error = True + self.hs.config.public_baseurl = "https://example.org" + self.hs.config.user_consent_version = "1.0" + + # Do a UI auth request + request, channel = self.make_request(b"POST", self.url, b"{}") + self.render(request) + + self.assertEquals(channel.result["code"], b"401", channel.result) + + self.assertTrue(channel.json_body is not None) + self.assertIsInstance(channel.json_body["session"], six.text_type) + + self.assertIsInstance(channel.json_body["flows"], list) + for flow in channel.json_body["flows"]: + self.assertIsInstance(flow["stages"], list) + self.assertTrue(len(flow["stages"]) > 0) + self.assertEquals(flow["stages"][-1], "m.login.terms") + + expected_params = { + "m.login.terms": { + "policies": { + "privacy_policy": { + "en": { + "name": "Privacy Policy", + "url": "https://example.org/_matrix/consent", + }, + "version": "1.0" + }, + }, + }, + } + self.assertIsInstance(channel.json_body["params"], dict) + self.assertDictContainsSubset(channel.json_body["params"], expected_params) + + # We have to complete the dummy auth stage before completing the terms stage + request_data = json.dumps( + { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.dummy", + }, + } + ) + + self.registration_handler.check_username = Mock(return_value=True) + + request, channel = make_request(b"POST", self.url, request_data) + self.render(request) + + # We don't bother checking that the response is correct - we'll leave that to + # other tests. We just want to make sure we're on the right path. + self.assertEquals(channel.result["code"], b"401", channel.result) + + # Finish the UI auth for terms + request_data = json.dumps( + { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.terms", + }, + } + ) + request, channel = make_request(b"POST", self.url, request_data) + self.render(request) + + # We're interested in getting a response that looks like a successful registration, + # not so much that the details are exactly what we want. + + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertTrue(channel.json_body is not None) + self.assertIsInstance(channel.json_body["user_id"], six.text_type) + self.assertIsInstance(channel.json_body["access_token"], six.text_type) + self.assertIsInstance(channel.json_body["device_id"], six.text_type) -- cgit 1.4.1 From 81880beff497c516946e28eb5d119ee60cad69e5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 24 Oct 2018 13:32:13 -0600 Subject: It helps to import things --- tests/test_terms_auth.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 0d95ae09b7..b1b0f2a8c6 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -12,6 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json + +import six +from mock import Mock + +from twisted.test.proto_helpers import MemoryReactorClock + +from synapse.api.errors import InteractiveAuthIncompleteError +from synapse.http.server import JsonResource +from synapse.rest.client.v2_alpha.register import register_servlets +from synapse.util import Clock + +from tests import unittest +from tests.server import make_request, render, setup_test_homeserver + + class TermsTestCase(unittest.HomeserverTestCase): servlets = [register_servlets] -- cgit 1.4.1 From a5468eaadfb80588ac10b6e1dd69f1282e65b544 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 24 Oct 2018 13:54:38 -0600 Subject: pep8 --- tests/rest/client/v2_alpha/test_register.py | 1 - tests/test_terms_auth.py | 8 +++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 5deae1c4dd..1c128e81f5 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,5 +1,4 @@ import json -import six from mock import Mock diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index b1b0f2a8c6..06b68f0a72 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -19,13 +19,11 @@ from mock import Mock from twisted.test.proto_helpers import MemoryReactorClock -from synapse.api.errors import InteractiveAuthIncompleteError -from synapse.http.server import JsonResource from synapse.rest.client.v2_alpha.register import register_servlets from synapse.util import Clock from tests import unittest -from tests.server import make_request, render, setup_test_homeserver +from tests.server import make_request class TermsTestCase(unittest.HomeserverTestCase): @@ -114,8 +112,8 @@ class TermsTestCase(unittest.HomeserverTestCase): request, channel = make_request(b"POST", self.url, request_data) self.render(request) - # We're interested in getting a response that looks like a successful registration, - # not so much that the details are exactly what we want. + # We're interested in getting a response that looks like a successful + # registration, not so much that the details are exactly what we want. self.assertEquals(channel.result["code"], b"200", channel.result) -- cgit 1.4.1 From 77d3b5772fc53fdc4461ebb3e6bd2c6c8f7e78bf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 21:59:26 +0100 Subject: disable coverage checking I don't think we ever use this, and it slows things down. If we want to use it, we should just do so on a couple of builds rather than all of them. --- tox.ini | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 04d2f721bf..9de5a5704a 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,6 @@ envlist = packaging, py27, py36, pep8, check_isort [base] deps = - coverage Twisted>=17.1 mock python-subunit @@ -26,9 +25,7 @@ passenv = * commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete - coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} - {env:DUMP_COVERAGE_COMMAND:coverage report -m} + "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} [testenv:py27] -- cgit 1.4.1 From fc33e813231a9a78012080193a4a2a96951515da Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 00:44:55 +0100 Subject: Combine the pep8 and check_isort builds into one there's really no point spinning up two separate jobs for these. --- .travis.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 56e10dbdc6..64079b56f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ matrix: env: TOX_ENV=packaging - python: 3.6 - env: TOX_ENV=pep8 + env: TOX_ENV="pep8, check_isort" - python: 2.7 env: TOX_ENV=py27 @@ -46,9 +46,6 @@ matrix: services: - postgresql - - python: 3.6 - env: TOX_ENV=check_isort - - # we only need to check for the newsfragment if it's a PR build if: type = pull_request python: 3.6 -- cgit 1.4.1 From 46f98a6a297ff014e7e88fb056e59755fd18cf58 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 01:00:58 +0100 Subject: Only cache the wheels --- .travis.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 64079b56f6..ac34c36725 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,16 @@ sudo: false language: python -# tell travis to cache ~/.cache/pip -cache: pip +cache: + directories: + # we only bother to cache the wheels; parts of the http cache get + # invalidated every build (because they get served with a max-age of 600 + # seconds), which means that we end up re-uploading the whole cache for + # every build, which is time-consuming In any case, it's not obvious that + # downloading the cache from S3 would be much faster than downloading the + # originals from pypi. + # + - $HOME/.cache/pip/wheels # don't clone the whole repo history, one commit will do git: -- cgit 1.4.1 From edd2d828095c5e9352d2755af19c0be5e4dc9e0d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 01:06:39 +0100 Subject: oops, run the check_isort build --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ac34c36725..fd41841c77 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,7 +30,7 @@ matrix: env: TOX_ENV=packaging - python: 3.6 - env: TOX_ENV="pep8, check_isort" + env: TOX_ENV="pep8,check_isort" - python: 2.7 env: TOX_ENV=py27 -- cgit 1.4.1 From f8fe98812be74e37432f11508c07488079bf949d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 14:58:59 +0100 Subject: improve comments --- synapse/storage/monthly_active_users.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 9a5c3b7eda..01963c879f 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -184,18 +184,18 @@ class MonthlyActiveUsersStore(SQLBaseStore): "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - # Considered pushing cache invalidation down into txn method, but - # did not because txn is not a LoggingTransaction. This means I could not - # call txn.call_after(). Therefore cache is altered in background thread - # and calls from elsewhere to user_last_seen_monthly_active and - # get_monthly_active_count fail with ValueError in - # synapse/util/caches/descriptors.py#check_thread + if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary + to invalidate the caches on user_last_seen_monthly_active and + get_monthly_active_count. We can't do that here, because we are running + in a database thread rather than the main thread, and we can't call + txn.call_after because txn may not be a LoggingTransaction. Args: txn (cursor): -- cgit 1.4.1 From e5481b22aac800b016e05f50a50ced85a226b364 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 15:25:21 +0100 Subject: Use allow/deny --- synapse/config/room_directory.py | 12 ++++++------ tests/config/test_room_directory.py | 8 ++++---- tests/handlers/test_directory.py | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 2ca010afdb..9da13ab11b 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -35,13 +35,13 @@ class RoomDirectoryConfig(Config): # The format of this option is a list of rules that contain globs that # match against user_id and the new alias (fully qualified with server # name). The action in the first rule that matches is taken, which can - # currently either be "allowed" or "denied". + # currently either be "allow" or "deny". # # If no rules match the request is denied. alias_creation_rules: - user_id: "*" alias: "*" - action: allowed + action: allow """ def is_alias_creation_allowed(self, user_id, alias): @@ -56,7 +56,7 @@ class RoomDirectoryConfig(Config): """ for rule in self._alias_creation_rules: if rule.matches(user_id, alias): - return rule.action == "allowed" + return rule.action == "allow" return False @@ -67,12 +67,12 @@ class _AliasRule(object): user_id = rule["user_id"] alias = rule["alias"] - if action in ("allowed", "denied"): + if action in ("allow", "deny"): self.action = action else: raise ConfigError( - "alias_creation_rules rules can only have action of 'allowed'" - " or 'denied'" + "alias_creation_rules rules can only have action of 'allow'" + " or 'deny'" ) try: diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 75021a5f04..f37a17d618 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -26,16 +26,16 @@ class RoomDirectoryConfigTestCase(unittest.TestCase): alias_creation_rules: - user_id: "*bob*" alias: "*" - action: "denied" + action: "deny" - user_id: "*" alias: "#unofficial_*" - action: "allowed" + action: "allow" - user_id: "@foo*:example.com" alias: "*" - action: "allowed" + action: "allow" - user_id: "@gah:example.com" alias: "#goo:example.com" - action: "allowed" + action: "allow" """) rd_config = RoomDirectoryConfig() diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 4f299b74ba..8ae6556c0a 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -118,7 +118,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): { "user_id": "*", "alias": "#unofficial_*", - "action": "allowed", + "action": "allow", } ] -- cgit 1.4.1 From fcbd488e9a60a70a81cd7a94a6adebe6e06c525a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 16:13:43 +0100 Subject: add new line --- synapse/storage/monthly_active_users.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 01963c879f..cf4104dc2e 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -191,6 +191,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary to invalidate the caches on user_last_seen_monthly_active and get_monthly_active_count. We can't do that here, because we are running -- cgit 1.4.1 From cb53ce9d6429252d5ee012f5a476cc834251c27d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 17:49:55 +0100 Subject: Refactor state group lookup to reduce DB hits (#4011) Currently when fetching state groups from the data store we make two hits two the database: once for members and once for non-members (unless request is filtered to one or the other). This adds needless load to the datbase, so this PR refactors the lookup to make only a single database hit. --- changelog.d/4011.misc | 1 + synapse/handlers/initial_sync.py | 4 +- synapse/handlers/message.py | 20 +- synapse/handlers/pagination.py | 15 +- synapse/handlers/room.py | 22 +- synapse/handlers/sync.py | 97 ++--- synapse/rest/client/v1/room.py | 3 +- synapse/storage/events.py | 2 +- synapse/storage/state.py | 845 ++++++++++++++++++++++++--------------- synapse/visibility.py | 15 +- tests/storage/test_state.py | 175 +++++--- 11 files changed, 714 insertions(+), 485 deletions(-) create mode 100644 changelog.d/4011.misc diff --git a/changelog.d/4011.misc b/changelog.d/4011.misc new file mode 100644 index 0000000000..ad7768c4cd --- /dev/null +++ b/changelog.d/4011.misc @@ -0,0 +1 @@ +Reduce database load when fetching state groups diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index e009395207..563bb3cea3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler): room_end_token = "s%d" % (event.stream_ordering,) deferred_room_state = run_in_background( self.store.get_state_for_events, - [event.event_id], None, + [event.event_id], ) deferred_room_state.addCallback( lambda states: states[event.event_id] @@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler): def _room_initial_sync_parted(self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking): room_state = yield self.store.get_state_for_events( - [member_event_id], None + [member_event_id], ) room_state = room_state[member_event_id] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6c4fcfb10a..969e588e73 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -35,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.replication.http.send_event import ReplicationSendEventRestServlet +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID from synapse.util.async_helpers import Linearizer from synapse.util.frozenutils import frozendict_json_encoder @@ -80,7 +81,7 @@ class MessageHandler(object): elif membership == Membership.LEAVE: key = (event_type, state_key) room_state = yield self.store.get_state_for_events( - [membership_event_id], [key] + [membership_event_id], StateFilter.from_types([key]) ) data = room_state[membership_event_id].get(key) @@ -88,7 +89,7 @@ class MessageHandler(object): @defer.inlineCallbacks def get_state_events( - self, user_id, room_id, types=None, filtered_types=None, + self, user_id, room_id, state_filter=StateFilter.all(), at_token=None, is_guest=False, ): """Retrieve all state events for a given room. If the user is @@ -100,13 +101,8 @@ class MessageHandler(object): Args: user_id(str): The user requesting state events. room_id(str): The room ID to get all state events from. - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. at_token(StreamToken|None): the stream token of the at which we are requesting the stats. If the user is not allowed to view the state as of that stream token, we raise a 403 SynapseError. If None, returns the current @@ -139,7 +135,7 @@ class MessageHandler(object): event = last_events[0] if visible_events: room_state = yield self.store.get_state_for_events( - [event.event_id], types, filtered_types=filtered_types, + [event.event_id], state_filter=state_filter, ) room_state = room_state[event.event_id] else: @@ -158,12 +154,12 @@ class MessageHandler(object): if membership == Membership.JOIN: state_ids = yield self.store.get_filtered_current_state_ids( - room_id, types, filtered_types=filtered_types, + room_id, state_filter=state_filter, ) room_state = yield self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: room_state = yield self.store.get_state_for_events( - [membership_event_id], types, filtered_types=filtered_types, + [membership_event_id], state_filter=state_filter, ) room_state = room_state[membership_event_id] diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a155b6e938..43f81bd607 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -21,6 +21,7 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock from synapse.util.logcontext import run_in_background @@ -255,16 +256,14 @@ class PaginationHandler(object): if event_filter and event_filter.lazy_load_members(): # TODO: remove redundant members - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in events - ) - ] + # FIXME: we also care about invite targets etc. + state_filter = StateFilter.from_types( + (EventTypes.Member, event.sender) + for event in events + ) state_ids = yield self.store.get_state_ids_for_event( - events[0].event_id, types=types, + events[0].event_id, state_filter=state_filter, ) if state_ids: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab1571b27b..3ba92bdb4c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -33,6 +33,7 @@ from synapse.api.constants import ( RoomCreationPreset, ) from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils from synapse.visibility import filter_events_for_client @@ -489,23 +490,24 @@ class RoomContextHandler(object): else: last_event_id = event_id - types = None - filtered_types = None if event_filter and event_filter.lazy_load_members(): - members = set(ev.sender for ev in itertools.chain( - results["events_before"], - (results["event"],), - results["events_after"], - )) - filtered_types = [EventTypes.Member] - types = [(EventTypes.Member, member) for member in members] + state_filter = StateFilter.from_lazy_load_member_list( + ev.sender + for ev in itertools.chain( + results["events_before"], + (results["event"],), + results["events_after"], + ) + ) + else: + state_filter = StateFilter.all() # XXX: why do we return the state as of the last event rather than the # first? Shouldn't we be consistent with /sync? # https://github.com/matrix-org/matrix-doc/issues/687 state = yield self.store.get_state_for_events( - [last_event_id], types, filtered_types=filtered_types, + [last_event_id], state_filter=state_filter, ) results["state"] = list(state[last_event_id].values()) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 351892a94f..09739f2862 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -27,6 +27,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -469,25 +470,20 @@ class SyncHandler(object): )) @defer.inlineCallbacks - def get_state_after_event(self, event, types=None, filtered_types=None): + def get_state_after_event(self, event, state_filter=StateFilter.all()): """ Get the room state after the given event Args: event(synapse.events.EventBase): event of interest - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) """ state_ids = yield self.store.get_state_ids_for_event( - event.event_id, types, filtered_types=filtered_types, + event.event_id, state_filter=state_filter, ) if event.is_state(): state_ids = state_ids.copy() @@ -495,18 +491,14 @@ class SyncHandler(object): defer.returnValue(state_ids) @defer.inlineCallbacks - def get_state_at(self, room_id, stream_position, types=None, filtered_types=None): + def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): """ Get the room state at a particular stream position Args: room_id(str): room for which to get state stream_position(StreamToken): point at which to get state - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) @@ -522,7 +514,7 @@ class SyncHandler(object): if last_events: last_event = last_events[-1] state = yield self.get_state_after_event( - last_event, types, filtered_types=filtered_types, + last_event, state_filter=state_filter, ) else: @@ -563,10 +555,11 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( - last_event.event_id, [ + last_event.event_id, + state_filter=StateFilter.from_types([ (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), - ] + ]), ) # this is heavily cached, thus: fast. @@ -717,8 +710,7 @@ class SyncHandler(object): with Measure(self.clock, "compute_state_delta"): - types = None - filtered_types = None + members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( @@ -729,16 +721,21 @@ class SyncHandler(object): # We only request state for the members needed to display the # timeline: - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in batch.events - ) - ] + members_to_fetch = set( + event.sender # FIXME: we also care about invite targets etc. + for event in batch.events + ) + + if full_state: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + members_to_fetch.add(sync_config.user.to_string()) - # only apply the filtering to room members - filtered_types = [EventTypes.Member] + state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) + else: + state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id @@ -746,28 +743,19 @@ class SyncHandler(object): } if full_state: - if lazy_load_members: - # always make sure we LL ourselves so we know we're in the room - # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 - # We only need apply this on full state syncs given we disabled - # LL for incr syncs in #3840. - types.append((EventTypes.Member, sync_config.user.to_string())) - if batch: current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) else: current_state_ids = yield self.get_state_at( - room_id, stream_position=now_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=now_token, + state_filter=state_filter, ) state_ids = current_state_ids @@ -781,8 +769,7 @@ class SyncHandler(object): ) elif batch.limited: state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) # for now, we disable LL for gappy syncs - see @@ -797,17 +784,15 @@ class SyncHandler(object): # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). - types = None - filtered_types = None + state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=since_token, + state_filter=state_filter, ) current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = _calculate_state( @@ -821,7 +806,7 @@ class SyncHandler(object): else: state_ids = {} if lazy_load_members: - if types and batch.events: + if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. @@ -831,8 +816,12 @@ class SyncHandler(object): # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=None, # we only want members! + batch.events[0].event_id, + # we only want members! + state_filter=StateFilter.from_types( + (EventTypes.Member, member) + for member in members_to_fetch + ), ) if lazy_load_members and not include_redundant_members: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 663934efd0..fcfe7857f6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -33,6 +33,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID @@ -409,7 +410,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet): room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, - types=[(EventTypes.Member, None)], + state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) chunk = [] diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c780f55277..8881b009df 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2089,7 +2089,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore for sg in remaining_state_groups: logger.info("[purge] de-delta-ing remaining state group %s", sg) curr_state = self._get_state_groups_from_groups_txn( - txn, [sg], types=None + txn, [sg], ) curr_state = curr_state[sg] diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c4..ef65929bb2 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -19,6 +19,8 @@ from collections import namedtuple from six import iteritems, itervalues from six.moves import range +import attr + from twisted.internet import defer from synapse.api.constants import EventTypes @@ -48,6 +50,318 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt return len(self.delta_ids) if self.delta_ids else 0 +@attr.s(slots=True) +class StateFilter(object): + """A filter used when querying for state. + + Attributes: + types (dict[str, set[str]|None]): Map from type to set of state keys (or + None). This specifies which state_keys for the given type to fetch + from the DB. If None then all events with that type are fetched. If + the set is empty then no events with that type are fetched. + include_others (bool): Whether to fetch events with types that do not + appear in `types`. + """ + + types = attr.ib() + include_others = attr.ib(default=False) + + def __attrs_post_init__(self): + # If `include_others` is set we canonicalise the filter by removing + # wildcards from the types dictionary + if self.include_others: + self.types = { + k: v for k, v in iteritems(self.types) + if v is not None + } + + @staticmethod + def all(): + """Creates a filter that fetches everything. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=True) + + @staticmethod + def none(): + """Creates a filter that fetches nothing. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=False) + + @staticmethod + def from_types(types): + """Creates a filter that only fetches the given types + + Args: + types (Iterable[tuple[str, str|None]]): A list of type and state + keys to fetch. A state_key of None fetches everything for + that type + + Returns: + StateFilter + """ + type_dict = {} + for typ, s in types: + if typ in type_dict: + if type_dict[typ] is None: + continue + + if s is None: + type_dict[typ] = None + continue + + type_dict.setdefault(typ, set()).add(s) + + return StateFilter(types=type_dict) + + @staticmethod + def from_lazy_load_member_list(members): + """Creates a filter that returns all non-member events, plus the member + events for the given users + + Args: + members (iterable[str]): Set of user IDs + + Returns: + StateFilter + """ + return StateFilter( + types={EventTypes.Member: set(members)}, + include_others=True, + ) + + def return_expanded(self): + """Creates a new StateFilter where type wild cards have been removed + (except for memberships). The returned filter is a superset of the + current one, i.e. anything that passes the current filter will pass + the returned filter. + + This helps the caching as the DictionaryCache knows if it has *all* the + state, but does not know if it has all of the keys of a particular type, + which makes wildcard lookups expensive unless we have a complete cache. + Hence, if we are doing a wildcard lookup, populate the cache fully so + that we can do an efficient lookup next time. + + Note that since we have two caches, one for membership events and one for + other events, we can be a bit more clever than simply returning + `StateFilter.all()` if `has_wildcards()` is True. + + We return a StateFilter where: + 1. the list of membership events to return is the same + 2. if there is a wildcard that matches non-member events we + return all non-member events + + Returns: + StateFilter + """ + + if self.is_full(): + # If we're going to return everything then there's nothing to do + return self + + if not self.has_wildcards(): + # If there are no wild cards, there's nothing to do + return self + + if EventTypes.Member in self.types: + get_all_members = self.types[EventTypes.Member] is None + else: + get_all_members = self.include_others + + has_non_member_wildcard = self.include_others or any( + state_keys is None + for t, state_keys in iteritems(self.types) + if t != EventTypes.Member + ) + + if not has_non_member_wildcard: + # If there are no non-member wild cards we can just return ourselves + return self + + if get_all_members: + # We want to return everything. + return StateFilter.all() + else: + # We want to return all non-members, but only particular + # memberships + return StateFilter( + types={EventTypes.Member: self.types[EventTypes.Member]}, + include_others=True, + ) + + def make_sql_filter_clause(self): + """Converts the filter to an SQL clause. + + For example: + + f = StateFilter.from_types([("m.room.create", "")]) + clause, args = f.make_sql_filter_clause() + clause == "(type = ? AND state_key = ?)" + args == ['m.room.create', ''] + + + Returns: + tuple[str, list]: The SQL string (may be empty) and arguments. An + empty SQL string is returned when the filter matches everything + (i.e. is "full"). + """ + + where_clause = "" + where_args = [] + + if self.is_full(): + return where_clause, where_args + + if not self.include_others and not self.types: + # i.e. this is an empty filter, so we need to return a clause that + # will match nothing + return "1 = 2", [] + + # First we build up a lost of clauses for each type/state_key combo + clauses = [] + for etype, state_keys in iteritems(self.types): + if state_keys is None: + clauses.append("(type = ?)") + where_args.append(etype) + continue + + for state_key in state_keys: + clauses.append("(type = ? AND state_key = ?)") + where_args.extend((etype, state_key)) + + # This will match anything that appears in `self.types` + where_clause = " OR ".join(clauses) + + # If we want to include stuff that's not in the types dict then we add + # a `OR type NOT IN (...)` clause to the end. + if self.include_others: + if where_clause: + where_clause += " OR " + + where_clause += "type NOT IN (%s)" % ( + ",".join(["?"] * len(self.types)), + ) + where_args.extend(self.types) + + return where_clause, where_args + + def max_entries_returned(self): + """Returns the maximum number of entries this filter will return if + known, otherwise returns None. + + For example a simple state filter asking for `("m.room.create", "")` + will return 1, whereas the default state filter will return None. + + This is used to bail out early if the right number of entries have been + fetched. + """ + if self.has_wildcards(): + return None + + return len(self.concrete_types()) + + def filter_state(self, state_dict): + """Returns the state filtered with by this StateFilter + + Args: + state (dict[tuple[str, str], Any]): The state map to filter + + Returns: + dict[tuple[str, str], Any]: The filtered state map + """ + if self.is_full(): + return dict(state_dict) + + filtered_state = {} + for k, v in iteritems(state_dict): + typ, state_key = k + if typ in self.types: + state_keys = self.types[typ] + if state_keys is None or state_key in state_keys: + filtered_state[k] = v + elif self.include_others: + filtered_state[k] = v + + return filtered_state + + def is_full(self): + """Whether this filter fetches everything or not + + Returns: + bool + """ + return self.include_others and not self.types + + def has_wildcards(self): + """Whether the filter includes wildcards or is attempting to fetch + specific state. + + Returns: + bool + """ + + return ( + self.include_others + or any( + state_keys is None + for state_keys in itervalues(self.types) + ) + ) + + def concrete_types(self): + """Returns a list of concrete type/state_keys (i.e. not None) that + will be fetched. This will be a complete list if `has_wildcards` + returns False, but otherwise will be a subset (or even empty). + + Returns: + list[tuple[str,str]] + """ + return [ + (t, s) + for t, state_keys in iteritems(self.types) + if state_keys is not None + for s in state_keys + ] + + def get_member_split(self): + """Return the filter split into two: one which assumes it's exclusively + matching against member state, and one which assumes it's matching + against non member state. + + This is useful due to the returned filters giving correct results for + `is_full()`, `has_wildcards()`, etc, when operating against maps that + either exclusively contain member events or only contain non-member + events. (Which is the case when dealing with the member vs non-member + state caches). + + Returns: + tuple[StateFilter, StateFilter]: The member and non member filters + """ + + if EventTypes.Member in self.types: + state_keys = self.types[EventTypes.Member] + if state_keys is None: + member_filter = StateFilter.all() + else: + member_filter = StateFilter({EventTypes.Member: state_keys}) + elif self.include_others: + member_filter = StateFilter.all() + else: + member_filter = StateFilter.none() + + non_member_filter = StateFilter( + types={k: v for k, v in iteritems(self.types) if k != EventTypes.Member}, + include_others=self.include_others, + ) + + return member_filter, non_member_filter + + # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers. @@ -152,61 +466,41 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # FIXME: how should this be cached? - def get_filtered_current_state_ids(self, room_id, types, filtered_types=None): + def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()): """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result of doing a fresh state resolution as per state_handler.get_current_state + Args: room_id (str) - types (list[(Str, (Str|None))]): List of (type, state_key) tuples - which are used to filter the state fetched. `state_key` may be - None, which matches any `state_key` - filtered_types (list[Str]|None): List of types to apply the above filter to. + state_filter (StateFilter): The state filter used to fetch state + from the database. + Returns: - deferred: dict of (type, state_key) -> event + Deferred[dict[tuple[str, str], str]]: Map from type/state_key to + event ID. """ - include_other_types = False if filtered_types is None else True - def _get_filtered_current_state_ids_txn(txn): results = {} - sql = """SELECT type, state_key, event_id FROM current_state_events - WHERE room_id = ? %s""" - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] - - if include_other_types: - unique_types = set(filtered_types) - clause_to_args.append( - ( - "AND type <> ? " * len(unique_types), - list(unique_types) - ) - ) - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - for where_clause, where_args in clause_to_args: - args = [room_id] - args.extend(where_args) - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (intern_string(typ), intern_string(state_key)) - results[key] = event_id + sql = """ + SELECT type, state_key, event_id FROM current_state_events + WHERE room_id = ? + """ + + where_clause, where_args = state_filter.make_sql_filter_clause() + + if where_clause: + sql += " AND (%s)" % (where_clause,) + + args = [room_id] + args.extend(where_args) + txn.execute(sql, args) + for row in txn: + typ, state_key, event_id = row + key = (intern_string(typ), intern_string(state_key)) + results[key] = event_id + return results return self.runInteraction( @@ -322,20 +616,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): }) @defer.inlineCallbacks - def _get_state_groups_from_groups(self, groups, types, members=None): + def _get_state_groups_from_groups(self, groups, state_filter): """Returns the state groups for a given set of groups, filtering on types of state events. Args: groups(list[int]): list of state group IDs to query - types (Iterable[str, str|None]|None): list of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. If None, all types are returned. - members (bool|None): If not None, then, in addition to any filtering - implied by types, the results are also filtered to only include - member events (if True), or to exclude member events (if False) - - Returns: + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) @@ -346,19 +634,23 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for chunk in chunks: res = yield self.runInteraction( "_get_state_groups_from_groups", - self._get_state_groups_from_groups_txn, chunk, types, members, + self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) defer.returnValue(results) def _get_state_groups_from_groups_txn( - self, txn, groups, types=None, members=None, + self, txn, groups, state_filter=StateFilter.all(), ): results = {group: {} for group in groups} - if types is not None: - types = list(set(types)) # deduplicate types list + where_clause, where_args = state_filter.make_sql_filter_clause() + + # Unless the filter clause is empty, we're going to append it after an + # existing where clause + if where_clause: + where_clause = " AND (%s)" % (where_clause,) if isinstance(self.database_engine, PostgresEngine): # Temporarily disable sequential scans in this transaction. This is @@ -374,79 +666,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # group for the given type, state_key. # This may return multiple rows per (type, state_key), but last_value # should be the same. - sql = (""" + sql = """ WITH RECURSIVE state(state_group) AS ( VALUES(?::bigint) UNION ALL SELECT prev_state_group FROM state_group_edges e, state s WHERE s.state_group = e.state_group ) - SELECT type, state_key, last_value(event_id) OVER ( + SELECT DISTINCT type, state_key, last_value(event_id) OVER ( PARTITION BY type, state_key ORDER BY state_group ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) AS event_id FROM state_groups_state WHERE state_group IN ( SELECT state_group FROM state ) - %s - """) + """ - if members is True: - sql += " AND type = '%s'" % (EventTypes.Member,) - elif members is False: - sql += " AND type <> '%s'" % (EventTypes.Member,) - - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types is not None: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - - for where_clause, where_args in clause_to_args: - for group in groups: - args = [group] - args.extend(where_args) + for group in groups: + args = [group] + args.extend(where_args) - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (typ, state_key) - results[group][key] = event_id + txn.execute(sql + where_clause, args) + for row in txn: + typ, state_key, event_id = row + key = (typ, state_key) + results[group][key] = event_id else: - where_args = [] - where_clauses = [] - wildcard_types = False - if types is not None: - for typ in types: - if typ[1] is None: - where_clauses.append("(type = ?)") - where_args.append(typ[0]) - wildcard_types = True - else: - where_clauses.append("(type = ? AND state_key = ?)") - where_args.extend([typ[0], typ[1]]) - - where_clause = "AND (%s)" % (" OR ".join(where_clauses)) - else: - where_clause = "" - - if members is True: - where_clause += " AND type = '%s'" % EventTypes.Member - elif members is False: - where_clause += " AND type <> '%s'" % EventTypes.Member + max_entries_returned = state_filter.max_entries_returned() # We don't use WITH RECURSIVE on sqlite3 as there are distributions # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) @@ -460,12 +706,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # without the right indices (which we can't add until # after we finish deduping state, which requires this func) args = [next_group] - if types: - args.extend(where_args) + args.extend(where_args) txn.execute( "SELECT type, state_key, event_id FROM state_groups_state" - " WHERE state_group = ? %s" % (where_clause,), + " WHERE state_group = ? " + where_clause, args ) results[group].update( @@ -481,9 +726,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # wildcards (i.e. Nones) in which case we have to do an exhaustive # search if ( - types is not None and - not wildcard_types and - len(results[group]) == len(types) + max_entries_returned is not None and + len(results[group]) == max_entries_returned ): break @@ -498,20 +742,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return results @defer.inlineCallbacks - def get_state_for_events(self, event_ids, types, filtered_types=None): + def get_state_for_events(self, event_ids, state_filter=StateFilter.all()): """Given a list of event_ids and type tuples, return a list of state - dicts for each event. The state dicts will only have the type/state_keys - that are in the `types` list. + dicts for each event. Args: event_ids (list[string]) - types (list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: deferred: A dict of (event_id) -> (type, state_key) -> [state_events] @@ -521,7 +759,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) state_event_map = yield self.get_events( [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)], @@ -540,20 +778,15 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None): + def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): """ Get the state dicts corresponding to a list of events, containing the event_ids of the state events (as opposed to the events themselves) Args: event_ids(list(str)): events whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from event_id -> (type, state_key) -> event_id @@ -563,7 +796,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) event_to_state = { event_id: group_to_state[group] @@ -573,45 +806,35 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_for_event(self, event_id, types=None, filtered_types=None): + def get_state_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @defer.inlineCallbacks - def get_state_ids_for_event(self, event_id, types=None, filtered_types=None): + def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_ids_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @cached(max_entries=50000) @@ -642,18 +865,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) - def _get_some_state_from_cache(self, cache, group, types, filtered_types=None): + def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to use group(int): The state group to lookup - types(list[str, str|None]): List of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating if we successfully retrieved all @@ -662,124 +881,102 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ is_all, known_absent, state_dict_ids = cache.get(group) - type_to_key = {} + if is_all or state_filter.is_full(): + # Either we have everything or want everything, either way + # `is_all` tells us whether we've gotten everything. + return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our requested types are missing from the cache missing_types = False - for typ, state_key in types: - key = (typ, state_key) - - if ( - state_key is None or - (filtered_types is not None and typ not in filtered_types) - ): - type_to_key[typ] = None - # we mark the type as missing from the cache because - # when the cache was populated it might have been done with a - # restricted set of state_keys, so the wildcard will not work - # and the cache may be incomplete. - missing_types = True - else: - if type_to_key.get(typ, object()) is not None: - type_to_key.setdefault(typ, set()).add(state_key) - + if state_filter.has_wildcards(): + # We don't know if we fetched all the state keys for the types in + # the filter that are wildcards, so we have to assume that we may + # have missed some. + missing_types = True + else: + # There aren't any wild cards, so `concrete_types()` returns the + # complete list of event types we're wanting. + for key in state_filter.concrete_types(): if key not in state_dict_ids and key not in known_absent: missing_types = True + break - sentinel = object() - - def include(typ, state_key): - valid_state_keys = type_to_key.get(typ, sentinel) - if valid_state_keys is sentinel: - return filtered_types is not None and typ not in filtered_types - if valid_state_keys is None: - return True - if state_key in valid_state_keys: - return True - return False - - got_all = is_all - if not got_all: - # the cache is incomplete. We may still have got all the results we need, if - # we don't have any wildcards in the match list. - if not missing_types and filtered_types is None: - got_all = True - - return { - k: v for k, v in iteritems(state_dict_ids) - if include(k[0], k[1]) - }, got_all - - def _get_all_state_from_cache(self, cache, group): - """Checks if group is in cache. See `_get_state_for_groups` - - Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool - indicating if we successfully retrieved all requests state from the - cache, if False we need to query the DB for the missing state. - - Args: - cache(DictionaryCache): the state group cache to use - group: The state group to lookup - """ - is_all, _, state_dict_ids = cache.get(group) - - return state_dict_ids, is_all + return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks - def _get_state_for_groups(self, groups, types=None, filtered_types=None): + def _get_state_for_groups(self, groups, state_filter=StateFilter.all()): """Gets the state at each of a list of state groups, optionally filtering by type/state_key Args: groups (iterable[int]): list of state groups for which we want to get the state. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. - + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) """ - if types is not None: - non_member_types = [t for t in types if t[0] != EventTypes.Member] - if filtered_types is not None and EventTypes.Member not in filtered_types: - # we want all of the membership events - member_types = None - else: - member_types = [t for t in types if t[0] == EventTypes.Member] - - else: - non_member_types = None - member_types = None + member_filter, non_member_filter = state_filter.get_member_split() - non_member_state = yield self._get_state_for_groups_using_cache( - groups, self._state_group_cache, non_member_types, filtered_types, + # Now we look them up in the member and non-member caches + non_member_state, incomplete_groups_nm, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_cache, + state_filter=non_member_filter, + ) ) - # XXX: we could skip this entirely if member_types is [] - member_state = yield self._get_state_for_groups_using_cache( - # we set filtered_types=None as member_state only ever contain members. - groups, self._state_group_members_cache, member_types, None, + + member_state, incomplete_groups_m, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_members_cache, + state_filter=member_filter, + ) ) - state = non_member_state + state = dict(non_member_state) for group in groups: state[group].update(member_state[group]) + # Now fetch any missing groups from the database + + incomplete_groups = incomplete_groups_m | incomplete_groups_nm + + if not incomplete_groups: + defer.returnValue(state) + + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence + + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + group_to_state_dict = yield self._get_state_groups_from_groups( + list(incomplete_groups), + state_filter=db_state_filter, + ) + + # Now lets update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # And finally update the result dict, by filtering out any extra + # stuff we pulled out of the database. + for group, group_state_dict in iteritems(group_to_state_dict): + # We just replace any existing entries, as we will have loaded + # everything we need from the database anyway. + state[group] = state_filter.filter_state(group_state_dict) + defer.returnValue(state) - @defer.inlineCallbacks def _get_state_for_groups_using_cache( - self, groups, cache, types=None, filtered_types=None + self, groups, cache, state_filter, ): """Gets the state at each of a list of state groups, optionally filtering by type/state_key, querying from a specific cache. @@ -790,89 +987,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): cache (DictionaryCache): the cache of group ids to state dicts which we will pass through - either the normal state cache or the specific members state cache. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: - Deferred[dict[int, dict[tuple[str, str], str]]]: - dict of state_group_id -> (dict of (type, state_key) -> event id) + tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of + dict of state_group_id -> (dict of (type, state_key) -> event id) + of entries in the cache, and the state group ids either missing + from the cache or incomplete. """ - if types: - types = frozenset(types) results = {} - missing_groups = [] - if types is not None: - for group in set(groups): - state_dict_ids, got_all = self._get_some_state_from_cache( - cache, group, types, filtered_types - ) - results[group] = state_dict_ids + incomplete_groups = set() + for group in set(groups): + state_dict_ids, got_all = self._get_state_for_group_using_cache( + cache, group, state_filter + ) + results[group] = state_dict_ids - if not got_all: - missing_groups.append(group) - else: - for group in set(groups): - state_dict_ids, got_all = self._get_all_state_from_cache( - cache, group - ) + if not got_all: + incomplete_groups.add(group) - results[group] = state_dict_ids + return results, incomplete_groups - if not got_all: - missing_groups.append(group) + def _insert_into_cache(self, group_to_state_dict, state_filter, + cache_seq_num_members, cache_seq_num_non_members): + """Inserts results from querying the database into the relevant cache. - if missing_groups: - # Okay, so we have some missing_types, let's fetch them. - cache_seq_num = cache.sequence + Args: + group_to_state_dict (dict): The new entries pulled from database. + Map from state group to state dict + state_filter (StateFilter): The state filter used to fetch state + from the database. + cache_seq_num_members (int): Sequence number of member cache since + last lookup in cache + cache_seq_num_non_members (int): Sequence number of member cache since + last lookup in cache + """ - # the DictionaryCache knows if it has *all* the state, but - # does not know if it has all of the keys of a particular type, - # which makes wildcard lookups expensive unless we have a complete - # cache. Hence, if we are doing a wildcard lookup, populate the - # cache fully so that we can do an efficient lookup next time. + # We need to work out which types we've fetched from the DB for the + # member vs non-member caches. This should be as accurate as possible, + # but can be an underestimate (e.g. when we have wild cards) - if filtered_types or (types and any(k is None for (t, k) in types)): - types_to_fetch = None - else: - types_to_fetch = types + member_filter, non_member_filter = state_filter.get_member_split() + if member_filter.is_full(): + # We fetched all member events + member_types = None + else: + # `concrete_types()` will only return a subset when there are wild + # cards in the filter, but that's fine. + member_types = member_filter.concrete_types() - group_to_state_dict = yield self._get_state_groups_from_groups( - missing_groups, types_to_fetch, cache == self._state_group_members_cache, - ) + if non_member_filter.is_full(): + # We fetched all non member events + non_member_types = None + else: + non_member_types = non_member_filter.concrete_types() + + for group, group_state_dict in iteritems(group_to_state_dict): + state_dict_members = {} + state_dict_non_members = {} - for group, group_state_dict in iteritems(group_to_state_dict): - state_dict = results[group] - - # update the result, filtering by `types`. - if types: - for k, v in iteritems(group_state_dict): - (typ, _) = k - if ( - (k in types or (typ, None) in types) or - (filtered_types and typ not in filtered_types) - ): - state_dict[k] = v + for k, v in iteritems(group_state_dict): + if k[0] == EventTypes.Member: + state_dict_members[k] = v else: - state_dict.update(group_state_dict) - - # update the cache with all the things we fetched from the - # database. - cache.update( - cache_seq_num, - key=group, - value=group_state_dict, - fetched_keys=types_to_fetch, - ) + state_dict_non_members[k] = v - defer.returnValue(results) + self._state_group_members_cache.update( + cache_seq_num_members, + key=group, + value=state_dict_members, + fetched_keys=member_types, + ) + + self._state_group_cache.update( + cache_seq_num_non_members, + key=group, + value=state_dict_non_members, + fetched_keys=non_member_types, + ) def store_state_group(self, event_id, room_id, prev_group, delta_ids, current_state_ids): @@ -1181,12 +1374,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): continue prev_state = self._get_state_groups_from_groups_txn( - txn, [prev_group], types=None + txn, [prev_group], ) prev_state = prev_state[prev_group] curr_state = self._get_state_groups_from_groups_txn( - txn, [state_group], types=None + txn, [state_group], ) curr_state = curr_state[state_group] diff --git a/synapse/visibility.py b/synapse/visibility.py index 43f48196be..0281a7c919 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events.utils import prune_event +from synapse.storage.state import StateFilter from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -72,7 +73,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False, ) event_id_to_state = yield store.get_state_for_events( frozenset(e.event_id for e in events), - types=types, + state_filter=StateFilter.from_types(types), ) ignore_dict_content = yield store.get_global_account_data_by_type_for_user( @@ -273,8 +274,8 @@ def filter_events_for_server(store, server_name, events): # need to check membership (as we know the server is in the room). event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), + state_filter=StateFilter.from_types( + types=((EventTypes.RoomHistoryVisibility, ""),), ) ) @@ -314,9 +315,11 @@ def filter_events_for_server(store, server_name, events): # of the history vis and membership state at those events. event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.Member, None), + state_filter=StateFilter.from_types( + types=( + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.Member, None), + ), ) ) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index b9c5b39d59..086a39d834 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.state import StateFilter from synapse.types import RoomID, UserID import tests.unittest @@ -148,7 +149,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we get the full state as of the final event state = yield self.store.get_state_for_event( - e5.event_id, None, filtered_types=None + e5.event_id, ) self.assertIsNotNone(e4) @@ -166,33 +167,35 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we can filter to the m.room.name event (with a '' state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, '')], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, '')]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can filter to the m.room.name event (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, None)]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can grab the m.room.member events (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Member, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Member, None)]) ) self.assertStateMapEqual( {(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state ) - # check we can use filtered_types to grab a specific room member - # without filtering out the other event types + # check we can grab a specific room member without filtering out the + # other event types state = yield self.store.get_state_for_event( e5.event_id, - [(EventTypes.Member, self.u_alice.to_string())], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {self.u_alice.to_string()}}, + include_others=True, + ) ) self.assertStateMapEqual( @@ -204,10 +207,12 @@ class StateStoreTestCase(tests.unittest.TestCase): state, ) - # check that types=[], filtered_types=[EventTypes.Member] - # doesn't return all members + # check that we can grab everything except members state = yield self.store.get_state_for_event( - e5.event_id, [], filtered_types=[EventTypes.Member] + e5.event_id, state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertStateMapEqual( @@ -215,16 +220,21 @@ class StateStoreTestCase(tests.unittest.TestCase): ) ####################################################### - # _get_some_state_from_cache tests against a full cache + # _get_state_for_group_using_cache tests against a full cache ####################################################### room_id = self.room.to_string() group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id]) group = list(group_ids.keys())[0] - # test _get_some_state_from_cache correctly filters out members with types=[] - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -236,22 +246,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members with wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -263,11 +278,13 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -280,12 +297,15 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -297,23 +317,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) @@ -357,42 +381,54 @@ class StateStoreTestCase(tests.unittest.TestCase): ############################################ # test that things work with a partial cache - # test _get_some_state_from_cache correctly filters out members with types=[] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -404,44 +440,53 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) -- cgit 1.4.1 From 871c4abfecfd14acda13e3f25c7d040f848a9a32 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:40:41 +0100 Subject: Factor _generate_room_id out of create_room we're going to need this for room upgrades. --- synapse/handlers/room.py | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3ba92bdb4c..000a22b07c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -165,28 +165,7 @@ class RoomCreationHandler(BaseHandler): visibility = config.get("visibility", None) is_public = visibility == "public" - # autogen room IDs and try to create it. We may clash, so just - # try a few times till one goes through, giving up eventually. - attempts = 0 - room_id = None - while attempts < 5: - try: - random_string = stringutils.random_string(18) - gen_room_id = RoomID( - random_string, - self.hs.hostname, - ) - yield self.store.store_room( - room_id=gen_room_id.to_string(), - room_creator_user_id=user_id, - is_public=is_public - ) - room_id = gen_room_id.to_string() - break - except StoreError: - attempts += 1 - if not room_id: - raise StoreError(500, "Couldn't generate a room ID.") + room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public) if room_alias: directory_handler = self.hs.get_handlers().directory_handler @@ -427,6 +406,28 @@ class RoomCreationHandler(BaseHandler): content=content, ) + @defer.inlineCallbacks + def _generate_room_id(self, creator_id, is_public): + # autogen room IDs and try to create it. We may clash, so just + # try a few times till one goes through, giving up eventually. + attempts = 0 + while attempts < 5: + try: + random_string = stringutils.random_string(18) + gen_room_id = RoomID( + random_string, + self.hs.hostname, + ).to_string() + yield self.store.store_room( + room_id=gen_room_id, + room_creator_user_id=creator_id, + is_public=is_public, + ) + defer.returnValue(gen_room_id) + except StoreError: + attempts += 1 + raise StoreError(500, "Couldn't generate a room ID.") + class RoomContextHandler(object): def __init__(self, hs): -- cgit 1.4.1 From 379376e5e6ae242d9a69f0ec738758a649058a82 Mon Sep 17 00:00:00 2001 From: Cédric Laudrel Date: Thu, 25 Oct 2018 16:36:07 +0200 Subject: Make Docker image listening on ipv6 as well as ipv4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cédric Laudrel --- changelog.d/4089.feature | 1 + docker/conf/homeserver.yaml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/4089.feature diff --git a/changelog.d/4089.feature b/changelog.d/4089.feature new file mode 100644 index 0000000000..62c9d839bb --- /dev/null +++ b/changelog.d/4089.feature @@ -0,0 +1 @@ + Configure Docker image to listen on both ipv4 and ipv6. diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index a38b929f50..1b0f655d26 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -21,7 +21,7 @@ listeners: {% if not SYNAPSE_NO_TLS %} - port: 8448 - bind_addresses: ['0.0.0.0'] + bind_addresses: ['::'] type: http tls: true x_forwarded: false @@ -34,7 +34,7 @@ listeners: - port: 8008 tls: false - bind_addresses: ['0.0.0.0'] + bind_addresses: ['::'] type: http x_forwarded: false -- cgit 1.4.1 From 7f7b2cd3de192816bcb0225774a22617989aec37 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:42:37 +0100 Subject: Make room_member_handler a member of RoomCreationHandler ... to save passing it into `_send_events_for_new_room` --- synapse/handlers/register.py | 6 ++++-- synapse/handlers/room.py | 9 +++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e9d7b25a36..7b4549223f 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,7 +50,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.room_creation_handler = self.hs.get_room_creation_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -241,7 +240,10 @@ class RegistrationHandler(BaseHandler): else: # create room expects the localpart of the room alias room_alias_localpart = room_alias.localpart - yield self.room_creation_handler.create_room( + + # getting the RoomCreationHandler during init gives a dependency + # loop + yield self.hs.get_room_creation_handler().create_room( fake_requester, config={ "preset": "public_chat", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 000a22b07c..d03d2cd7be 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -73,6 +73,7 @@ class RoomCreationHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self.room_member_handler = hs.get_room_member_handler() @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, @@ -195,12 +196,9 @@ class RoomCreationHandler(BaseHandler): # override any attempt to set room versions via the creation_content creation_content["room_version"] = room_version - room_member_handler = self.hs.get_room_member_handler() - yield self._send_events_for_new_room( requester, room_id, - room_member_handler, preset_config=preset_config, invite_list=invite_list, initial_state=initial_state, @@ -242,7 +240,7 @@ class RoomCreationHandler(BaseHandler): if is_direct: content["is_direct"] = is_direct - yield room_member_handler.update_membership( + yield self.room_member_handler.update_membership( requester, UserID.from_string(invitee), room_id, @@ -280,7 +278,6 @@ class RoomCreationHandler(BaseHandler): self, creator, # A Requester object. room_id, - room_member_handler, preset_config, invite_list, initial_state, @@ -325,7 +322,7 @@ class RoomCreationHandler(BaseHandler): content=creation_content, ) - yield room_member_handler.update_membership( + yield self.room_member_handler.update_membership( creator, creator.user, room_id, -- cgit 1.4.1 From e1948175ee7fc469c985b58a01ecc2eb577e5e0a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:50:06 +0100 Subject: Allow power_level_content_override=None for _send_events_for_new_room --- synapse/handlers/room.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d03d2cd7be..d42c2c41c4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -204,7 +204,7 @@ class RoomCreationHandler(BaseHandler): initial_state=initial_state, creation_content=creation_content, room_alias=room_alias, - power_level_content_override=config.get("power_level_content_override", {}), + power_level_content_override=config.get("power_level_content_override"), creator_join_profile=creator_join_profile, ) @@ -282,9 +282,9 @@ class RoomCreationHandler(BaseHandler): invite_list, initial_state, creation_content, - room_alias, - power_level_content_override, - creator_join_profile, + room_alias=None, + power_level_content_override=None, + creator_join_profile=None, ): def create(etype, content, **kwargs): e = { @@ -364,7 +364,8 @@ class RoomCreationHandler(BaseHandler): for invitee in invite_list: power_level_content["users"][invitee] = 100 - power_level_content.update(power_level_content_override) + if power_level_content_override: + power_level_content.update(power_level_content_override) yield send( etype=EventTypes.PowerLevels, -- cgit 1.4.1 From 0f7d1c99061075fe54a37cfe785184f095addf78 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 22 Aug 2018 10:57:54 +0100 Subject: Basic initial support for room upgrades Currently just creates a new, empty, room, and sends a tombstone in the old room. --- synapse/api/constants.py | 1 + synapse/handlers/room.py | 121 +++++++++++++++++++++ synapse/rest/__init__.py | 2 + .../client/v2_alpha/room_upgrade_rest_servlet.py | 78 +++++++++++++ synapse/server.pyi | 6 + 5 files changed, 208 insertions(+) create mode 100644 synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c2630c4c64..5565e516d6 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -61,6 +61,7 @@ class LoginType(object): class EventTypes(object): Member = "m.room.member" Create = "m.room.create" + Tombstone = "m.room.tombstone" JoinRules = "m.room.join_rules" PowerLevels = "m.room.power_levels" Aliases = "m.room.aliases" diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d42c2c41c4..3cce6f6150 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -36,6 +36,7 @@ from synapse.api.errors import AuthError, Codes, StoreError, SynapseError from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils +from synapse.util.async_helpers import Linearizer from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -75,6 +76,124 @@ class RoomCreationHandler(BaseHandler): self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + # linearizer to stop two upgrades happening at once + self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") + + @defer.inlineCallbacks + def upgrade_room(self, requester, old_room_id, new_version): + """Replace a room with a new room with a different version + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_version (unicode): the new room version to use + + Returns: + Deferred[unicode]: the new room id + """ + yield self.ratelimit(requester) + + user_id = requester.user.to_string() + + with (yield self._upgrade_linearizer.queue(old_room_id)): + # start by allocating a new room id + is_public = False # XXX fixme + new_room_id = yield self._generate_room_id( + creator_id=user_id, is_public=is_public, + ) + + # we create and auth the tombstone event before properly creating the new + # room, to check our user has perms in the old room. + tombstone_event, tombstone_context = ( + yield self.event_creation_handler.create_event( + requester, { + "type": EventTypes.Tombstone, + "state_key": "", + "room_id": old_room_id, + "sender": user_id, + "content": { + "body": "This room has been replaced", + "replacement_room": new_room_id, + } + }, + token_id=requester.access_token_id, + ) + ) + yield self.auth.check_from_context(tombstone_event, tombstone_context) + + yield self.clone_exiting_room( + requester, + old_room_id=old_room_id, + new_room_id=new_room_id, + new_room_version=new_version, + tombstone_event_id=tombstone_event.event_id, + ) + + # now send the tombstone + yield self.event_creation_handler.send_nonmember_event( + requester, tombstone_event, tombstone_context, + ) + + # XXX send a power_levels in the old room, if possible + + defer.returnValue(new_room_id) + + @defer.inlineCallbacks + def clone_exiting_room( + self, requester, old_room_id, new_room_id, new_room_version, + tombstone_event_id, + ): + """Populate a new room based on an old room + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_room_id (unicode): the id to give the new room (should already have been + created with _gemerate_room_id()) + new_room_version (unicode): the new room version to use + tombstone_event_id (unicode|str): the ID of the tombstone event in the old + room. + Returns: + Deferred[None] + """ + user_id = requester.user.to_string() + + if not self.spam_checker.user_may_create_room(user_id): + raise SynapseError(403, "You are not permitted to create rooms") + + # XXX check alias is free + # canonical_alias = None + + # XXX create association in directory handler + # XXX preset + + preset_config = RoomCreationPreset.PRIVATE_CHAT + + creation_content = { + "room_version": new_room_version, + "predecessor": { + "room_id": old_room_id, + "event_id": tombstone_event_id, + } + } + + initial_state = OrderedDict() + + yield self._send_events_for_new_room( + requester, + new_room_id, + preset_config=preset_config, + invite_list=[], + initial_state=initial_state, + creation_content=creation_content, + ) + + # XXX name + # XXX topic + # XXX invites/joins + # XXX 3pid invites + # XXX directory_handler.send_room_alias_update_event + @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, creator_join_profile=None): @@ -416,6 +535,8 @@ class RoomCreationHandler(BaseHandler): random_string, self.hs.hostname, ).to_string() + if isinstance(gen_room_id, bytes): + gen_room_id = gen_room_id.decode('utf-8') yield self.store.store_room( room_id=gen_room_id, room_creator_user_id=creator_id, diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 4856822a5d..5f35c2d1be 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -47,6 +47,7 @@ from synapse.rest.client.v2_alpha import ( register, report_event, room_keys, + room_upgrade_rest_servlet, sendtodevice, sync, tags, @@ -116,3 +117,4 @@ class ClientRestResource(JsonResource): sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) groups.register_servlets(hs, client_resource) + room_upgrade_rest_servlet.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py new file mode 100644 index 0000000000..1b195f90c4 --- /dev/null +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.constants import KNOWN_ROOM_VERSIONS +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) + +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class RoomUpgradeRestServlet(RestServlet): + PATTERNS = client_v2_patterns( + # /rooms/$roomid/upgrade + "/rooms/(?P[^/]*)/upgrade$", + v2_alpha=False, + ) + + def __init__(self, hs): + """ + + Args: + hs (synapse.server.HomeServer): + """ + super(RoomUpgradeRestServlet, self).__init__() + self._hs = hs + self._room_creation_handler = hs.get_room_creation_handler() + self._auth = hs.get_auth() + + @defer.inlineCallbacks + def on_POST(self, request, room_id): + requester = yield self._auth.get_user_by_req(request) + + content = parse_json_object_from_request(request) + assert_params_in_dict(content, ("new_version", )) + new_version = content["new_version"] + + if new_version not in KNOWN_ROOM_VERSIONS: + raise SynapseError( + 400, + "Your homeserver does not support this room version", + Codes.UNSUPPORTED_ROOM_VERSION, + ) + + new_room_id = yield self._room_creation_handler.upgrade_room( + requester, room_id, new_version + ) + + ret = { + "replacement_room": new_room_id, + } + + defer.returnValue((200, ret)) + + +def register_servlets(hs, http_server): + RoomUpgradeRestServlet(hs).register(http_server) diff --git a/synapse/server.pyi b/synapse/server.pyi index ce28486233..06cd083a74 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -7,6 +7,9 @@ import synapse.handlers.auth import synapse.handlers.deactivate_account import synapse.handlers.device import synapse.handlers.e2e_keys +import synapse.handlers.room +import synapse.handlers.room_member +import synapse.handlers.message import synapse.handlers.set_password import synapse.rest.media.v1.media_repository import synapse.server_notices.server_notices_manager @@ -50,6 +53,9 @@ class HomeServer(object): def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler: pass + def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler: + pass + def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler: pass -- cgit 1.4.1 From 4cda300058ba68f97c032923ebf429f437eddd8e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 11:13:40 +0100 Subject: preserve room visibility --- synapse/handlers/room.py | 8 +++++--- synapse/storage/room.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3cce6f6150..2f9eb8ef4c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -32,7 +32,7 @@ from synapse.api.constants import ( JoinRules, RoomCreationPreset, ) -from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -97,9 +97,11 @@ class RoomCreationHandler(BaseHandler): with (yield self._upgrade_linearizer.queue(old_room_id)): # start by allocating a new room id - is_public = False # XXX fixme + r = yield self.store.get_room(old_room_id) + if r is None: + raise NotFoundError("Unknown room id %s" % (old_room_id,)) new_room_id = yield self._generate_room_id( - creator_id=user_id, is_public=is_public, + creator_id=user_id, is_public=r["is_public"], ) # we create and auth the tombstone event before properly creating the new diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 61013b8919..41c65e112a 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore): Args: room_id (str): The ID of the room to retrieve. Returns: - A namedtuple containing the room information, or an empty list. + A dict containing the room information, or None if the room is unknown. """ return self._simple_select_one( table="rooms", -- cgit 1.4.1 From 1b9f253e208ea3a471594bde52366e3abf54fc1a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 12:05:18 +0100 Subject: preserve PLs --- synapse/handlers/room.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2f9eb8ef4c..40ca12f1b7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -179,7 +179,13 @@ class RoomCreationHandler(BaseHandler): } } - initial_state = OrderedDict() + initial_state = dict() + + old_room_state_ids = yield self.store.get_current_state_ids(old_room_id) + pl_event_id = old_room_state_ids.get((EventTypes.PowerLevels, "")) + if pl_event_id: + pl_event = yield self.store.get_event(pl_event_id) + initial_state[(EventTypes.PowerLevels, "")] = pl_event.content yield self._send_events_for_new_room( requester, -- cgit 1.4.1 From 3a263bf3aec6b9709fed391671f8faec334dc739 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 17:05:48 +0100 Subject: copy state --- synapse/handlers/room.py | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 40ca12f1b7..ab92ca5e78 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -167,9 +167,6 @@ class RoomCreationHandler(BaseHandler): # canonical_alias = None # XXX create association in directory handler - # XXX preset - - preset_config = RoomCreationPreset.PRIVATE_CHAT creation_content = { "room_version": new_room_version, @@ -181,23 +178,41 @@ class RoomCreationHandler(BaseHandler): initial_state = dict() - old_room_state_ids = yield self.store.get_current_state_ids(old_room_id) - pl_event_id = old_room_state_ids.get((EventTypes.PowerLevels, "")) - if pl_event_id: - pl_event = yield self.store.get_event(pl_event_id) - initial_state[(EventTypes.PowerLevels, "")] = pl_event.content + types_to_copy = ( + (EventTypes.PowerLevels, ""), + (EventTypes.JoinRules, ""), + (EventTypes.Name, ""), + (EventTypes.Topic, ""), + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.GuestAccess, "") + ) + + old_room_state_ids = yield self.store.get_filtered_current_state_ids( + old_room_id, StateFilter.from_types(types_to_copy), + ) + # map from event_id to BaseEvent + old_room_state_events = yield self.store.get_events(old_room_state_ids.values()) + + for k in types_to_copy: + old_event_id = old_room_state_ids.get(k) + if old_event_id: + old_event = old_room_state_events.get(old_event_id) + if old_event: + initial_state[k] = old_event.content yield self._send_events_for_new_room( requester, new_room_id, - preset_config=preset_config, + + # we expect to override all the presets with initial_state, so this is + # somewhat arbitrary. + preset_config=RoomCreationPreset.PRIVATE_CHAT, + invite_list=[], initial_state=initial_state, creation_content=creation_content, ) - # XXX name - # XXX topic # XXX invites/joins # XXX 3pid invites # XXX directory_handler.send_room_alias_update_event -- cgit 1.4.1 From e6babc27d51c3de04cdaedc40439b7ddb56b2e12 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 23:14:36 +0100 Subject: restrict PLs in old room --- synapse/handlers/room.py | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab92ca5e78..d016f0e8b8 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,7 +136,49 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # XXX send a power_levels in the old room, if possible + # ... and restrict the PLs in the old room, if possible. + old_room_pl_state = yield self.state_handler.get_current_state( + old_room_id, + event_type=EventTypes.PowerLevels, + latest_event_ids=(tombstone_event.event_id, ), + ) + + if old_room_pl_state is None: + logger.warning( + "Not supported: upgrading a room with no PL event. Not setting PLs " + "in old room.", + ) + else: + pl_content = dict(old_room_pl_state.content) + users_default = int(pl_content.get("users_default", 0)) + restricted_level = max(users_default + 1, 50) + + updated = False + for v in ("invite", "events_default"): + current = int(pl_content.get(v, 0)) + if current < restricted_level: + logger.debug( + "Setting level for %s in %s to %i (was %i)", + v, old_room_id, restricted_level, current, + ) + pl_content[v] = restricted_level + updated = True + else: + logger.debug( + "Not setting level for %s (already %i)", + v, current, + ) + + if updated: + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": old_room_id, + "sender": user_id, + "content": pl_content, + }, ratelimit=False, + ) defer.returnValue(new_room_id) -- cgit 1.4.1 From 68c0ce62d839fe3a2014c1df32b761eeb0916b9c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 18:17:00 +0100 Subject: changelog --- changelog.d/4091.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4091.feature diff --git a/changelog.d/4091.feature b/changelog.d/4091.feature new file mode 100644 index 0000000000..a3f7dbdcdd --- /dev/null +++ b/changelog.d/4091.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones -- cgit 1.4.1 From 474810d9d545c07cfac567be7b29100a66cb2b7c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 23:15:03 +0100 Subject: fix broken test This test stubbed out some stuff in a very weird way. I have no idea why. It broke. --- tests/server_notices/test_resource_limits_server_notices.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 4701eedd45..b1551df7ca 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -4,7 +4,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError -from synapse.handlers.auth import AuthHandler from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) @@ -13,17 +12,10 @@ from tests import unittest from tests.utils import setup_test_homeserver -class AuthHandlers(object): - def __init__(self, hs): - self.auth_handler = AuthHandler(hs) - - class TestResourceLimitsServerNotices(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) - self.hs.handlers = AuthHandlers(self.hs) - self.auth_handler = self.hs.handlers.auth_handler + self.hs = yield setup_test_homeserver(self.addCleanup) self.server_notices_sender = self.hs.get_server_notices_sender() # relying on [1] is far from ideal, but the only case where -- cgit 1.4.1 From 77d70a7646bd30315393b9aec44e452d5739a266 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 26 Oct 2018 22:05:22 +1100 Subject: Port register_new_matrix_user to Python 3 and add tests (#4085) --- changelog.d/4085.feature | 1 + scripts/register_new_matrix_user | 204 +------------------------ synapse/_scripts/__init__.py | 0 synapse/_scripts/register_new_matrix_user.py | 215 +++++++++++++++++++++++++++ tests/scripts/__init__.py | 0 tests/scripts/test_new_matrix_user.py | 160 ++++++++++++++++++++ 6 files changed, 378 insertions(+), 202 deletions(-) create mode 100644 changelog.d/4085.feature create mode 100644 synapse/_scripts/__init__.py create mode 100644 synapse/_scripts/register_new_matrix_user.py create mode 100644 tests/scripts/__init__.py create mode 100644 tests/scripts/test_new_matrix_user.py diff --git a/changelog.d/4085.feature b/changelog.d/4085.feature new file mode 100644 index 0000000000..4bd3ddcf2c --- /dev/null +++ b/changelog.d/4085.feature @@ -0,0 +1 @@ +The register_new_matrix_user script is now ported to Python 3. diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 89143c5d59..b450712ab7 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -16,207 +16,7 @@ from __future__ import print_function -import argparse -import getpass -import hashlib -import hmac -import json -import sys -import urllib2 - -from six import input - -import yaml - - -def request_registration(user, password, server_location, shared_secret, admin=False): - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - headers={'Content-Type': 'application/json'}, - ) - - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - body = f.read() - f.close() - nonce = json.loads(body)["nonce"] - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1) - - mac.update(nonce) - mac.update("\x00") - mac.update(user) - mac.update("\x00") - mac.update(password) - mac.update("\x00") - mac.update("admin" if admin else "notadmin") - - mac = mac.hexdigest() - - data = { - "nonce": nonce, - "username": user, - "password": password, - "mac": mac, - "admin": admin, - } - - server_location = server_location.rstrip("/") - - print("Sending registration request...") - - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - data=json.dumps(data), - headers={'Content-Type': 'application/json'}, - ) - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - f.read() - f.close() - print("Success.") - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - -def register_new_user(user, password, server_location, shared_secret, admin): - if not user: - try: - default_user = getpass.getuser() - except Exception: - default_user = None - - if default_user: - user = input("New user localpart [%s]: " % (default_user,)) - if not user: - user = default_user - else: - user = input("New user localpart: ") - - if not user: - print("Invalid user name") - sys.exit(1) - - if not password: - password = getpass.getpass("Password: ") - - if not password: - print("Password cannot be blank.") - sys.exit(1) - - confirm_password = getpass.getpass("Confirm password: ") - - if password != confirm_password: - print("Passwords do not match") - sys.exit(1) - - if admin is None: - admin = input("Make admin [no]: ") - if admin in ("y", "yes", "true"): - admin = True - else: - admin = False - - request_registration(user, password, server_location, shared_secret, bool(admin)) - +from synapse._scripts.register_new_matrix_user import main if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" - " configured with the 'registration_shared_secret' option" - " set." - ) - parser.add_argument( - "-u", - "--user", - default=None, - help="Local part of the new user. Will prompt if omitted.", - ) - parser.add_argument( - "-p", - "--password", - default=None, - help="New password for user. Will prompt if omitted.", - ) - admin_group = parser.add_mutually_exclusive_group() - admin_group.add_argument( - "-a", - "--admin", - action="store_true", - help=( - "Register new user as an admin. " - "Will prompt if --no-admin is not set either." - ), - ) - admin_group.add_argument( - "--no-admin", - action="store_true", - help=( - "Register new user as a regular user. " - "Will prompt if --admin is not set either." - ), - ) - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - "-c", - "--config", - type=argparse.FileType('r'), - help="Path to server config file. Used to read in shared secret.", - ) - - group.add_argument( - "-k", "--shared-secret", help="Shared secret as defined in server config file." - ) - - parser.add_argument( - "server_url", - default="https://localhost:8448", - nargs='?', - help="URL to use to talk to the home server. Defaults to " - " 'https://localhost:8448'.", - ) - - args = parser.parse_args() - - if "config" in args and args.config: - config = yaml.safe_load(args.config) - secret = config.get("registration_shared_secret", None) - if not secret: - print("No 'registration_shared_secret' defined in config.") - sys.exit(1) - else: - secret = args.shared_secret - - admin = None - if args.admin or args.no_admin: - admin = args.admin - - register_new_user(args.user, args.password, args.server_url, secret, admin) + main() diff --git a/synapse/_scripts/__init__.py b/synapse/_scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py new file mode 100644 index 0000000000..70cecde486 --- /dev/null +++ b/synapse/_scripts/register_new_matrix_user.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import argparse +import getpass +import hashlib +import hmac +import logging +import sys + +from six.moves import input + +import requests as _requests +import yaml + + +def request_registration( + user, + password, + server_location, + shared_secret, + admin=False, + requests=_requests, + _print=print, + exit=sys.exit, +): + + url = "%s/_matrix/client/r0/admin/register" % (server_location,) + + # Get the nonce + r = requests.get(url, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + nonce = r.json()["nonce"] + + mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1) + + mac.update(nonce.encode('utf8')) + mac.update(b"\x00") + mac.update(user.encode('utf8')) + mac.update(b"\x00") + mac.update(password.encode('utf8')) + mac.update(b"\x00") + mac.update(b"admin" if admin else b"notadmin") + + mac = mac.hexdigest() + + data = { + "nonce": nonce, + "username": user, + "password": password, + "mac": mac, + "admin": admin, + } + + _print("Sending registration request...") + r = requests.post(url, json=data, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + _print("Success!") + + +def register_new_user(user, password, server_location, shared_secret, admin): + if not user: + try: + default_user = getpass.getuser() + except Exception: + default_user = None + + if default_user: + user = input("New user localpart [%s]: " % (default_user,)) + if not user: + user = default_user + else: + user = input("New user localpart: ") + + if not user: + print("Invalid user name") + sys.exit(1) + + if not password: + password = getpass.getpass("Password: ") + + if not password: + print("Password cannot be blank.") + sys.exit(1) + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + print("Passwords do not match") + sys.exit(1) + + if admin is None: + admin = input("Make admin [no]: ") + if admin in ("y", "yes", "true"): + admin = True + else: + admin = False + + request_registration(user, password, server_location, shared_secret, bool(admin)) + + +def main(): + + logging.captureWarnings(True) + + parser = argparse.ArgumentParser( + description="Used to register new users with a given home server when" + " registration has been disabled. The home server must be" + " configured with the 'registration_shared_secret' option" + " set." + ) + parser.add_argument( + "-u", + "--user", + default=None, + help="Local part of the new user. Will prompt if omitted.", + ) + parser.add_argument( + "-p", + "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + admin_group = parser.add_mutually_exclusive_group() + admin_group.add_argument( + "-a", + "--admin", + action="store_true", + help=( + "Register new user as an admin. " + "Will prompt if --no-admin is not set either." + ), + ) + admin_group.add_argument( + "--no-admin", + action="store_true", + help=( + "Register new user as a regular user. " + "Will prompt if --admin is not set either." + ), + ) + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + "-c", + "--config", + type=argparse.FileType('r'), + help="Path to server config file. Used to read in shared secret.", + ) + + group.add_argument( + "-k", "--shared-secret", help="Shared secret as defined in server config file." + ) + + parser.add_argument( + "server_url", + default="https://localhost:8448", + nargs='?', + help="URL to use to talk to the home server. Defaults to " + " 'https://localhost:8448'.", + ) + + args = parser.parse_args() + + if "config" in args and args.config: + config = yaml.safe_load(args.config) + secret = config.get("registration_shared_secret", None) + if not secret: + print("No 'registration_shared_secret' defined in config.") + sys.exit(1) + else: + secret = args.shared_secret + + admin = None + if args.admin or args.no_admin: + admin = args.admin + + register_new_user(args.user, args.password, args.server_url, secret, admin) + + +if __name__ == "__main__": + main() diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py new file mode 100644 index 0000000000..6f56893f5e --- /dev/null +++ b/tests/scripts/test_new_matrix_user.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from synapse._scripts.register_new_matrix_user import request_registration + +from tests.unittest import TestCase + + +class RegisterTestCase(TestCase): + def test_success(self): + """ + The script will fetch a nonce, and then generate a MAC with it, and then + post that MAC. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + r.status_code = 200 + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # We should get the success message making sure everything is OK. + self.assertIn("Success!", out) + + # sys.exit shouldn't have been called. + self.assertEqual(err_code, []) + + def test_failure_nonce(self): + """ + If the script fails to fetch a nonce, it throws an error and quits. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 404 + r.reason = "Not Found" + r.json = lambda: {"not": "error"} + return r + + requests = Mock() + requests.get = get + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 404 Not Found", out) + self.assertNotIn("Success!", out) + + def test_failure_post(self): + """ + The script will fetch a nonce, and then if the final POST fails, will + report an error and quit. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + # Then 500 because we're jerks + r.status_code = 500 + r.reason = "Broken" + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 500 Broken", out) + self.assertNotIn("Success!", out) -- cgit 1.4.1 From 193cadc988801d9035124d1fd3ca23607b9b1f25 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 17:10:30 +0100 Subject: Address review comments Improve comments, get old room state from the context we already have --- synapse/handlers/room.py | 16 +++++++++------- .../client/v2_alpha/room_upgrade_rest_servlet.py | 21 ++++++++++++++++----- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d016f0e8b8..145b5b19ee 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,19 +136,21 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # ... and restrict the PLs in the old room, if possible. - old_room_pl_state = yield self.state_handler.get_current_state( - old_room_id, - event_type=EventTypes.PowerLevels, - latest_event_ids=(tombstone_event.event_id, ), - ) + old_room_state = yield tombstone_context.get_current_state_ids(self.store) + old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) - if old_room_pl_state is None: + if old_room_pl_event_id is None: logger.warning( "Not supported: upgrading a room with no PL event. Not setting PLs " "in old room.", ) else: + # we try to stop regular users from speaking by setting the PL required + # to send regular events and invites to 'Moderator' level. That's normally + # 50, but if the default PL in a room is 50 or more, then we set the + # required PL above that. + + old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) pl_content = dict(old_room_pl_state.content) users_default = int(pl_content.get("users_default", 0)) restricted_level = max(users_default + 1, 50) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 1b195f90c4..e6356101fd 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -31,6 +31,22 @@ logger = logging.getLogger(__name__) class RoomUpgradeRestServlet(RestServlet): + """Handler for room uprade requests. + + Handles requests of the form: + + POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1 + Content-Type: application/json + + { + "new_version": "2", + } + + Creates a new room and shuts down the old one. Returns the ID of the new room. + + Args: + hs (synapse.server.HomeServer): + """ PATTERNS = client_v2_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", @@ -38,11 +54,6 @@ class RoomUpgradeRestServlet(RestServlet): ) def __init__(self, hs): - """ - - Args: - hs (synapse.server.HomeServer): - """ super(RoomUpgradeRestServlet, self).__init__() self._hs = hs self._room_creation_handler = hs.get_room_creation_handler() -- cgit 1.4.1 From 54bbe71867fb3de2e3984e2b3eb909845c2448b3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 22:51:34 +0100 Subject: optimise state copying --- synapse/handlers/room.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 145b5b19ee..8e48c1ca6a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -21,7 +21,7 @@ import math import string from collections import OrderedDict -from six import string_types +from six import iteritems, string_types from twisted.internet import defer @@ -237,12 +237,10 @@ class RoomCreationHandler(BaseHandler): # map from event_id to BaseEvent old_room_state_events = yield self.store.get_events(old_room_state_ids.values()) - for k in types_to_copy: - old_event_id = old_room_state_ids.get(k) - if old_event_id: - old_event = old_room_state_events.get(old_event_id) - if old_event: - initial_state[k] = old_event.content + for k, old_event_id in iteritems(old_room_state_ids): + old_event = old_room_state_events.get(old_event_id) + if old_event: + initial_state[k] = old_event.content yield self._send_events_for_new_room( requester, -- cgit 1.4.1 From 5caf79b312947c823977c89275c1ea5750aeec92 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 23:56:40 +0100 Subject: Remember to copy the avatar on room upgrades --- changelog.d/4100.feature | 1 + synapse/handlers/room.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4100.feature diff --git a/changelog.d/4100.feature b/changelog.d/4100.feature new file mode 100644 index 0000000000..a3f7dbdcdd --- /dev/null +++ b/changelog.d/4100.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8e48c1ca6a..c59c02527c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -228,7 +228,8 @@ class RoomCreationHandler(BaseHandler): (EventTypes.Name, ""), (EventTypes.Topic, ""), (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.GuestAccess, "") + (EventTypes.GuestAccess, ""), + (EventTypes.RoomAvatar, ""), ) old_room_state_ids = yield self.store.get_filtered_current_state_ids( -- cgit 1.4.1 From db24d7f15e406390d57b23d48a78fa33604a47e7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 23:47:37 +0100 Subject: Better handling of odd PLs during room upgrades Fixes handling of rooms where we have permission to send the tombstone, but not other state. We need to (a) fail more gracefully when we can't send the PLs in the old room, and (b) not set the PLs in the new room until we are done with the other stuff. --- changelog.d/4099.feature | 1 + synapse/handlers/room.py | 125 ++++++++++++++++++++++++++++++----------------- 2 files changed, 82 insertions(+), 44 deletions(-) create mode 100644 changelog.d/4099.feature diff --git a/changelog.d/4099.feature b/changelog.d/4099.feature new file mode 100644 index 0000000000..a3f7dbdcdd --- /dev/null +++ b/changelog.d/4099.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8e48c1ca6a..70085db625 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,53 +136,91 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) + # and finally, shut down the PLs in the old room, and update them in the new + # room. old_room_state = yield tombstone_context.get_current_state_ids(self.store) - old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) - if old_room_pl_event_id is None: - logger.warning( - "Not supported: upgrading a room with no PL event. Not setting PLs " - "in old room.", + yield self._update_upgraded_room_pls( + requester, old_room_id, new_room_id, old_room_state, + ) + + defer.returnValue(new_room_id) + + @defer.inlineCallbacks + def _update_upgraded_room_pls( + self, requester, old_room_id, new_room_id, old_room_state, + ): + """Send updated power levels in both rooms after an upgrade + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_room_id (unicode): the id of the replacement room + old_room_state (dict[tuple[str, str], str]): the state map for the old room + + Returns: + Deferred + """ + old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) + + if old_room_pl_event_id is None: + logger.warning( + "Not supported: upgrading a room with no PL event. Not setting PLs " + "in old room.", + ) + return + + old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) + + # we try to stop regular users from speaking by setting the PL required + # to send regular events and invites to 'Moderator' level. That's normally + # 50, but if the default PL in a room is 50 or more, then we set the + # required PL above that. + + pl_content = dict(old_room_pl_state.content) + users_default = int(pl_content.get("users_default", 0)) + restricted_level = max(users_default + 1, 50) + + updated = False + for v in ("invite", "events_default"): + current = int(pl_content.get(v, 0)) + if current < restricted_level: + logger.info( + "Setting level for %s in %s to %i (was %i)", + v, old_room_id, restricted_level, current, ) + pl_content[v] = restricted_level + updated = True else: - # we try to stop regular users from speaking by setting the PL required - # to send regular events and invites to 'Moderator' level. That's normally - # 50, but if the default PL in a room is 50 or more, then we set the - # required PL above that. - - old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) - pl_content = dict(old_room_pl_state.content) - users_default = int(pl_content.get("users_default", 0)) - restricted_level = max(users_default + 1, 50) - - updated = False - for v in ("invite", "events_default"): - current = int(pl_content.get(v, 0)) - if current < restricted_level: - logger.debug( - "Setting level for %s in %s to %i (was %i)", - v, old_room_id, restricted_level, current, - ) - pl_content[v] = restricted_level - updated = True - else: - logger.debug( - "Not setting level for %s (already %i)", - v, current, - ) - - if updated: - yield self.event_creation_handler.create_and_send_nonmember_event( - requester, { - "type": EventTypes.PowerLevels, - "state_key": '', - "room_id": old_room_id, - "sender": user_id, - "content": pl_content, - }, ratelimit=False, - ) - - defer.returnValue(new_room_id) + logger.info( + "Not setting level for %s (already %i)", + v, current, + ) + + if updated: + try: + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": old_room_id, + "sender": requester.user.to_string(), + "content": pl_content, + }, ratelimit=False, + ) + except AuthError as e: + logger.warning("Unable to update PLs in old room: %s", e) + + logger.info("Setting correct PLs in new room") + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": new_room_id, + "sender": requester.user.to_string(), + "content": old_room_pl_state.content, + }, ratelimit=False, + ) @defer.inlineCallbacks def clone_exiting_room( @@ -223,7 +261,6 @@ class RoomCreationHandler(BaseHandler): initial_state = dict() types_to_copy = ( - (EventTypes.PowerLevels, ""), (EventTypes.JoinRules, ""), (EventTypes.Name, ""), (EventTypes.Topic, ""), -- cgit 1.4.1 From 2b791865c454f3bd08aa16431ef506305f712609 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 21:52:52 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 1ddbbbebfb..c8377df7b2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7" +__version__ = "0.33.8rc1" -- cgit 1.4.1 From 3b0a85fc8ec869a559fbbdb818f254a1a26c9668 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 21:54:01 +1100 Subject: changelog --- CHANGES.md | 47 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/3698.misc | 1 - changelog.d/3786.misc | 1 - changelog.d/3969.bugfix | 1 - changelog.d/3975.feature | 1 - changelog.d/4011.misc | 1 - changelog.d/4031.misc | 1 - changelog.d/4041.misc | 1 - changelog.d/4046.bugfix | 1 - changelog.d/4049.misc | 1 - changelog.d/4050.bugfix | 1 - changelog.d/4051.feature | 1 - changelog.d/4057.bugfix | 1 - changelog.d/4060.bugfix | 1 - changelog.d/4061.bugfix | 1 - changelog.d/4063.misc | 1 - changelog.d/4067.bugfix | 1 - changelog.d/4068.bugfix | 1 - changelog.d/4068.misc | 1 - changelog.d/4072.misc | 1 - changelog.d/4073.misc | 1 - changelog.d/4074.bugfix | 1 - changelog.d/4075.misc | 1 - changelog.d/4076.misc | 1 - changelog.d/4077.misc | 1 - changelog.d/4081.bugfix | 2 -- changelog.d/4082.misc | 1 - changelog.d/4083.bugfix | 1 - changelog.d/4085.feature | 1 - changelog.d/4089.feature | 1 - 30 files changed, 47 insertions(+), 30 deletions(-) delete mode 100644 changelog.d/3698.misc delete mode 100644 changelog.d/3786.misc delete mode 100644 changelog.d/3969.bugfix delete mode 100644 changelog.d/3975.feature delete mode 100644 changelog.d/4011.misc delete mode 100644 changelog.d/4031.misc delete mode 100644 changelog.d/4041.misc delete mode 100644 changelog.d/4046.bugfix delete mode 100644 changelog.d/4049.misc delete mode 100644 changelog.d/4050.bugfix delete mode 100644 changelog.d/4051.feature delete mode 100644 changelog.d/4057.bugfix delete mode 100644 changelog.d/4060.bugfix delete mode 100644 changelog.d/4061.bugfix delete mode 100644 changelog.d/4063.misc delete mode 100644 changelog.d/4067.bugfix delete mode 100644 changelog.d/4068.bugfix delete mode 100644 changelog.d/4068.misc delete mode 100644 changelog.d/4072.misc delete mode 100644 changelog.d/4073.misc delete mode 100644 changelog.d/4074.bugfix delete mode 100644 changelog.d/4075.misc delete mode 100644 changelog.d/4076.misc delete mode 100644 changelog.d/4077.misc delete mode 100644 changelog.d/4081.bugfix delete mode 100644 changelog.d/4082.misc delete mode 100644 changelog.d/4083.bugfix delete mode 100644 changelog.d/4085.feature delete mode 100644 changelog.d/4089.feature diff --git a/CHANGES.md b/CHANGES.md index fb98c934c0..dc2a745c8b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,50 @@ +Synapse 0.33.8rc1 (2018-10-29) +============================== + +Features +-------- + +- Servers with auto-join rooms will now automatically create those rooms when the first user registers ([\#3975](https://github.com/matrix-org/synapse/issues/3975)) +- Add config option to control alias creation ([\#4051](https://github.com/matrix-org/synapse/issues/4051)) +- The register_new_matrix_user script is now ported to Python 3. ([\#4085](https://github.com/matrix-org/synapse/issues/4085)) +- Configure Docker image to listen on both ipv4 and ipv6. ([\#4089](https://github.com/matrix-org/synapse/issues/4089)) + + +Bugfixes +-------- + +- Fix HTTP error response codes for federated group requests. ([\#3969](https://github.com/matrix-org/synapse/issues/3969)) +- Fix issue where Python 3 users couldn't paginate /publicRooms ([\#4046](https://github.com/matrix-org/synapse/issues/4046)) +- Fix URL previewing to work in Python 3.7 ([\#4050](https://github.com/matrix-org/synapse/issues/4050)) +- synctl will use the right python executable to run worker processes ([\#4057](https://github.com/matrix-org/synapse/issues/4057)) +- Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. ([\#4060](https://github.com/matrix-org/synapse/issues/4060), [\#4067](https://github.com/matrix-org/synapse/issues/4067)) +- Fix some metrics being racy and causing exceptions when polled by Prometheus. ([\#4061](https://github.com/matrix-org/synapse/issues/4061)) +- Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. ([\#4068](https://github.com/matrix-org/synapse/issues/4068)) +- Correctly account for cpu usage by background threads ([\#4074](https://github.com/matrix-org/synapse/issues/4074)) +- Fix race condition where config defined reserved users were not being added to + the monthly active user list prior to the homeserver reactor firing up ([\#4081](https://github.com/matrix-org/synapse/issues/4081)) +- Fix bug which prevented backslashes being used in event field filters ([\#4083](https://github.com/matrix-org/synapse/issues/4083)) + + +Internal Changes +---------------- + +- Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook ([\#3698](https://github.com/matrix-org/synapse/issues/3698)) +- Add initial implementation of new state resolution algorithm ([\#3786](https://github.com/matrix-org/synapse/issues/3786)) +- Reduce database load when fetching state groups ([\#4011](https://github.com/matrix-org/synapse/issues/4011)) +- Various cleanups in the federation client code ([\#4031](https://github.com/matrix-org/synapse/issues/4031)) +- Run the CircleCI builds in docker containers ([\#4041](https://github.com/matrix-org/synapse/issues/4041)) +- Only colourise synctl output when attached to tty ([\#4049](https://github.com/matrix-org/synapse/issues/4049)) +- Refactor room alias creation code ([\#4063](https://github.com/matrix-org/synapse/issues/4063)) +- Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. ([\#4068](https://github.com/matrix-org/synapse/issues/4068)) +- The README now contains example for the Caddy web server. Contributed by steamp0rt. ([\#4072](https://github.com/matrix-org/synapse/issues/4072)) +- Add psutil as an explicit dependency ([\#4073](https://github.com/matrix-org/synapse/issues/4073)) +- Clean up threading and logcontexts in pushers ([\#4075](https://github.com/matrix-org/synapse/issues/4075)) +- Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings ([\#4076](https://github.com/matrix-org/synapse/issues/4076)) +- Give some more things logcontexts ([\#4077](https://github.com/matrix-org/synapse/issues/4077)) +- Clean up some bits of code which were flagged by the linter ([\#4082](https://github.com/matrix-org/synapse/issues/4082)) + + Synapse 0.33.7 (2018-10-18) =========================== diff --git a/changelog.d/3698.misc b/changelog.d/3698.misc deleted file mode 100644 index 12537e76f2..0000000000 --- a/changelog.d/3698.misc +++ /dev/null @@ -1 +0,0 @@ -Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook diff --git a/changelog.d/3786.misc b/changelog.d/3786.misc deleted file mode 100644 index a9f9a2bb27..0000000000 --- a/changelog.d/3786.misc +++ /dev/null @@ -1 +0,0 @@ -Add initial implementation of new state resolution algorithm diff --git a/changelog.d/3969.bugfix b/changelog.d/3969.bugfix deleted file mode 100644 index ca2759e91e..0000000000 --- a/changelog.d/3969.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix HTTP error response codes for federated group requests. diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature deleted file mode 100644 index 162f30a532..0000000000 --- a/changelog.d/3975.feature +++ /dev/null @@ -1 +0,0 @@ -Servers with auto-join rooms will now automatically create those rooms when the first user registers diff --git a/changelog.d/4011.misc b/changelog.d/4011.misc deleted file mode 100644 index ad7768c4cd..0000000000 --- a/changelog.d/4011.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database load when fetching state groups diff --git a/changelog.d/4031.misc b/changelog.d/4031.misc deleted file mode 100644 index 60be8b59fd..0000000000 --- a/changelog.d/4031.misc +++ /dev/null @@ -1 +0,0 @@ -Various cleanups in the federation client code diff --git a/changelog.d/4041.misc b/changelog.d/4041.misc deleted file mode 100644 index 8cce9daac9..0000000000 --- a/changelog.d/4041.misc +++ /dev/null @@ -1 +0,0 @@ -Run the CircleCI builds in docker containers diff --git a/changelog.d/4046.bugfix b/changelog.d/4046.bugfix deleted file mode 100644 index 5046dd1ce3..0000000000 --- a/changelog.d/4046.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix issue where Python 3 users couldn't paginate /publicRooms diff --git a/changelog.d/4049.misc b/changelog.d/4049.misc deleted file mode 100644 index 4370d9dfa6..0000000000 --- a/changelog.d/4049.misc +++ /dev/null @@ -1 +0,0 @@ -Only colourise synctl output when attached to tty diff --git a/changelog.d/4050.bugfix b/changelog.d/4050.bugfix deleted file mode 100644 index 3d1f6af847..0000000000 --- a/changelog.d/4050.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix URL priewing to work in Python 3.7 diff --git a/changelog.d/4051.feature b/changelog.d/4051.feature deleted file mode 100644 index 9c1b3a72a0..0000000000 --- a/changelog.d/4051.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option to control alias creation diff --git a/changelog.d/4057.bugfix b/changelog.d/4057.bugfix deleted file mode 100644 index 7577731255..0000000000 --- a/changelog.d/4057.bugfix +++ /dev/null @@ -1 +0,0 @@ -synctl will use the right python executable to run worker processes \ No newline at end of file diff --git a/changelog.d/4060.bugfix b/changelog.d/4060.bugfix deleted file mode 100644 index 78d69a8819..0000000000 --- a/changelog.d/4060.bugfix +++ /dev/null @@ -1 +0,0 @@ -Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. diff --git a/changelog.d/4061.bugfix b/changelog.d/4061.bugfix deleted file mode 100644 index 94ffcf7a51..0000000000 --- a/changelog.d/4061.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some metrics being racy and causing exceptions when polled by Prometheus. diff --git a/changelog.d/4063.misc b/changelog.d/4063.misc deleted file mode 100644 index 677fcb90ad..0000000000 --- a/changelog.d/4063.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor room alias creation code diff --git a/changelog.d/4067.bugfix b/changelog.d/4067.bugfix deleted file mode 100644 index 78d69a8819..0000000000 --- a/changelog.d/4067.bugfix +++ /dev/null @@ -1 +0,0 @@ -Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. diff --git a/changelog.d/4068.bugfix b/changelog.d/4068.bugfix deleted file mode 100644 index 74bda7491f..0000000000 --- a/changelog.d/4068.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. \ No newline at end of file diff --git a/changelog.d/4068.misc b/changelog.d/4068.misc deleted file mode 100644 index db6c4ade59..0000000000 --- a/changelog.d/4068.misc +++ /dev/null @@ -1 +0,0 @@ -Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. diff --git a/changelog.d/4072.misc b/changelog.d/4072.misc deleted file mode 100644 index 9d7279fd2b..0000000000 --- a/changelog.d/4072.misc +++ /dev/null @@ -1 +0,0 @@ -The README now contains example for the Caddy web server. Contributed by steamp0rt. diff --git a/changelog.d/4073.misc b/changelog.d/4073.misc deleted file mode 100644 index fc304bef06..0000000000 --- a/changelog.d/4073.misc +++ /dev/null @@ -1 +0,0 @@ -Add psutil as an explicit dependency diff --git a/changelog.d/4074.bugfix b/changelog.d/4074.bugfix deleted file mode 100644 index b3b6b00243..0000000000 --- a/changelog.d/4074.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly account for cpu usage by background threads diff --git a/changelog.d/4075.misc b/changelog.d/4075.misc deleted file mode 100644 index d08b8cc271..0000000000 --- a/changelog.d/4075.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up threading and logcontexts in pushers \ No newline at end of file diff --git a/changelog.d/4076.misc b/changelog.d/4076.misc deleted file mode 100644 index 9dd000decf..0000000000 --- a/changelog.d/4076.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings \ No newline at end of file diff --git a/changelog.d/4077.misc b/changelog.d/4077.misc deleted file mode 100644 index 52ca4c1de2..0000000000 --- a/changelog.d/4077.misc +++ /dev/null @@ -1 +0,0 @@ -Give some more things logcontexts diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix deleted file mode 100644 index cfe4b3e9d9..0000000000 --- a/changelog.d/4081.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix race condition where config defined reserved users were not being added to -the monthly active user list prior to the homeserver reactor firing up diff --git a/changelog.d/4082.misc b/changelog.d/4082.misc deleted file mode 100644 index a81faf5e9b..0000000000 --- a/changelog.d/4082.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some bits of code which were flagged by the linter diff --git a/changelog.d/4083.bugfix b/changelog.d/4083.bugfix deleted file mode 100644 index b3b08cdfa6..0000000000 --- a/changelog.d/4083.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which prevented backslashes being used in event field filters \ No newline at end of file diff --git a/changelog.d/4085.feature b/changelog.d/4085.feature deleted file mode 100644 index 4bd3ddcf2c..0000000000 --- a/changelog.d/4085.feature +++ /dev/null @@ -1 +0,0 @@ -The register_new_matrix_user script is now ported to Python 3. diff --git a/changelog.d/4089.feature b/changelog.d/4089.feature deleted file mode 100644 index 62c9d839bb..0000000000 --- a/changelog.d/4089.feature +++ /dev/null @@ -1 +0,0 @@ - Configure Docker image to listen on both ipv4 and ipv6. -- cgit 1.4.1 From c4b3698a80468957c63b2a79685ac06f76cabae1 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 22:59:44 +1100 Subject: Make the replication logger quieter (#4108) --- changelog.d/4108.misc | 1 + synapse/replication/tcp/client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4108.misc diff --git a/changelog.d/4108.misc b/changelog.d/4108.misc new file mode 100644 index 0000000000..85810c3d83 --- /dev/null +++ b/changelog.d/4108.misc @@ -0,0 +1 @@ +The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index cbe9645817..586dddb40b 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -106,7 +106,7 @@ class ReplicationClientHandler(object): Can be overriden in subclasses to handle more. """ - logger.info("Received rdata %s -> %s", stream_name, token) + logger.debug("Received rdata %s -> %s", stream_name, token) return self.store.process_replication_rows(stream_name, token, rows) def on_position(self, stream_name, token): -- cgit 1.4.1 From 4cd1c9f2ffa46bc8ed258da200ae3b8ba25fcbb5 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 23:57:24 +1100 Subject: Delete the disused & unspecced identicon functionality (#4106) --- changelog.d/4106.removal | 1 + scripts-dev/make_identicons.pl | 39 ----------------- synapse/handlers/register.py | 3 -- synapse/python_dependencies.py | 1 - synapse/rest/media/v1/identicon_resource.py | 68 ----------------------------- synapse/rest/media/v1/media_repository.py | 2 - 6 files changed, 1 insertion(+), 113 deletions(-) create mode 100644 changelog.d/4106.removal delete mode 100755 scripts-dev/make_identicons.pl delete mode 100644 synapse/rest/media/v1/identicon_resource.py diff --git a/changelog.d/4106.removal b/changelog.d/4106.removal new file mode 100644 index 0000000000..7e63208daa --- /dev/null +++ b/changelog.d/4106.removal @@ -0,0 +1 @@ +The disused and un-specced identicon generator has been removed. diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl deleted file mode 100755 index cbff63e298..0000000000 --- a/scripts-dev/make_identicons.pl +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use DBI; -use DBD::SQLite; -use JSON; -use Getopt::Long; - -my $db; # = "homeserver.db"; -my $server = "http://localhost:8008"; -my $size = 320; - -GetOptions("db|d=s", \$db, - "server|s=s", \$server, - "width|w=i", \$size) or usage(); - -usage() unless $db; - -my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr; - -my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr; - -foreach (@$res) { - my ($token, $mxid) = ($_->[0], $_->[1]); - my ($user_id) = ($mxid =~ m/@(.*):/); - my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id); - if (!$url || $url =~ /#auto$/) { - `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`; - my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`; - my $content_uri = from_json($json)->{content_uri}; - `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`; - } -} - -sub usage { - die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)"; -} \ No newline at end of file diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7b4549223f..d2beb275cf 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -256,9 +256,6 @@ class RegistrationHandler(BaseHandler): except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) - # We used to generate default identicons here, but nowadays - # we want clients to generate their own as part of their branding - # rather than there being consistent matrix-wide ones, so we don't. defer.returnValue((user_id, token)) @defer.inlineCallbacks diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 943876456b..ca62ee7637 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -51,7 +51,6 @@ REQUIREMENTS = { "daemonize>=2.3.1": ["daemonize"], "bcrypt>=3.1.0": ["bcrypt>=3.1.0"], "pillow>=3.1.2": ["PIL"], - "pydenticon>=0.2": ["pydenticon"], "sortedcontainers>=1.4.4": ["sortedcontainers"], "psutil>=2.0.0": ["psutil>=2.0.0"], "pysaml2>=3.0.0": ["saml2"], diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py deleted file mode 100644 index bdbd8d50dd..0000000000 --- a/synapse/rest/media/v1/identicon_resource.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pydenticon import Generator - -from twisted.web.resource import Resource - -from synapse.http.servlet import parse_integer - -FOREGROUND = [ - "rgb(45,79,255)", - "rgb(254,180,44)", - "rgb(226,121,234)", - "rgb(30,179,253)", - "rgb(232,77,65)", - "rgb(49,203,115)", - "rgb(141,69,170)" -] - -BACKGROUND = "rgb(224,224,224)" -SIZE = 5 - - -class IdenticonResource(Resource): - isLeaf = True - - def __init__(self): - Resource.__init__(self) - self.generator = Generator( - SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND, - ) - - def generate_identicon(self, name, width, height): - v_padding = width % SIZE - h_padding = height % SIZE - top_padding = v_padding // 2 - left_padding = h_padding // 2 - bottom_padding = v_padding - top_padding - right_padding = h_padding - left_padding - width -= v_padding - height -= h_padding - padding = (top_padding, bottom_padding, left_padding, right_padding) - identicon = self.generator.generate( - name, width, height, padding=padding - ) - return identicon - - def render_GET(self, request): - name = "/".join(request.postpath) - width = parse_integer(request, "width", default=96) - height = parse_integer(request, "height", default=96) - identicon_bytes = self.generate_identicon(name, width, height) - request.setHeader(b"Content-Type", b"image/png") - request.setHeader( - b"Cache-Control", b"public,max-age=86400,s-maxage=86400" - ) - return identicon_bytes diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 08b1867fab..d6c5f07af0 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -45,7 +45,6 @@ from ._base import FileInfo, respond_404, respond_with_responder from .config_resource import MediaConfigResource from .download_resource import DownloadResource from .filepath import MediaFilePaths -from .identicon_resource import IdenticonResource from .media_storage import MediaStorage from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper @@ -769,7 +768,6 @@ class MediaRepositoryResource(Resource): self.putChild(b"thumbnail", ThumbnailResource( hs, media_repo, media_repo.media_storage, )) - self.putChild(b"identicon", IdenticonResource()) if hs.config.url_preview_enabled: self.putChild(b"preview_url", PreviewUrlResource( hs, media_repo, media_repo.media_storage, -- cgit 1.4.1 From b2399f6281d7cd11e7762b683bdd5a4f0c24927e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:11 +0000 Subject: Make SQL a bit cleaner --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 59a50a5df9..45afd42b3f 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1272,14 +1272,13 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Check if state groups are referenced sql = """ - SELECT state_group, count(*) FROM event_to_state_groups + SELECT DISTINCT state_group FROM event_to_state_groups LEFT JOIN events_to_purge AS ep USING (event_id) WHERE state_group IN (%s) AND ep.event_id IS NULL - GROUP BY state_group """ % (",".join("?" for _ in current_search),) txn.execute(sql, list(current_search)) - referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced = set(sg for sg, in txn) referenced_groups |= referenced # We don't continue iterating up the state group graphs for state -- cgit 1.4.1 From f4f223aa4455bea3aa642c23ae957932b1168ba3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:49 +0000 Subject: Don't make temporary list --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 45afd42b3f..947d3fc177 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1266,9 +1266,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): current_search = next_to_search next_to_search = set() else: - lst = list(next_to_search) - current_search = set(lst[:100]) - next_to_search = set(lst[100:]) + current_search = set(islice(next_to_search, 100)) + next_to_search -= current_search # Check if state groups are referenced sql = """ -- cgit 1.4.1 From 664b192a3b4aa57597d9832361b025bc078ea87a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:21:43 +0000 Subject: Fix set operations thinko --- synapse/storage/state.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 947d3fc177..dfec57c045 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1293,10 +1293,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=("prev_state_group", "state_group",), ) - next_to_search.update(row["state_group"] for row in rows) + prevs = set(row["state_group"] for row in rows) # We don't bother re-handling groups we've already seen - next_to_search -= state_groups_seen - state_groups_seen |= next_to_search + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs for row in rows: # Note: Each state group can have at most one prev group -- cgit 1.4.1 From ad88460e0d2e0a7c2cf39ec5539d5c4ff030bbbd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:23:34 +0000 Subject: Move _find_unreferenced_groups --- synapse/storage/events.py | 85 +++++++++++++++++++++++++++++++++++++++++++++-- synapse/storage/state.py | 79 ------------------------------------------- 2 files changed, 83 insertions(+), 81 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e791922733..919e855f3b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2052,8 +2052,10 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] finding state groups that can be deleted") - state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( - txn, referenced_state_groups, + state_groups_to_delete, remaining_state_groups = ( + self._find_unreferenced_groups_during_purge( + txn, referenced_state_groups, + ) ) logger.info( @@ -2209,6 +2211,85 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] done") + def _find_unreferenced_groups_during_purge(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). + + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. + + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + current_search = set(itertools.islice(next_to_search, 100)) + next_to_search -= current_search + + # Check if state groups are referenced + sql = """ + SELECT DISTINCT state_group FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) + + referenced = set(sg for sg, in txn) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + rows = self._simple_select_many_txn( + txn, + table="state_group_edges", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), + ) + + prevs = set(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs + + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups + + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) + + return to_delete, to_dedelta + @defer.inlineCallbacks def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream diff --git a/synapse/storage/state.py b/synapse/storage/state.py index dfec57c045..d737bd6778 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1234,85 +1234,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _find_unreferenced_groups(self, txn, state_groups): - """Used when purging history to figure out which state groups can be - deleted and which need to be de-delta'ed (due to one of its prev groups - being scheduled for deletion). - - Args: - txn - state_groups (set[int]): Set of state groups referenced by events - that are going to be deleted. - - Returns: - tuple[set[int], set[int]]: The set of state groups that can be - deleted and the set of state groups that need to be de-delta'ed - """ - # Graph of state group -> previous group - graph = {} - - # Set of events that we have found to be referenced by events - referenced_groups = set() - - # Set of state groups we've already seen - state_groups_seen = set(state_groups) - - # Set of state groups to handle next. - next_to_search = set(state_groups) - while next_to_search: - # We bound size of groups we're looking up at once, to stop the - # SQL query getting too big - if len(next_to_search) < 100: - current_search = next_to_search - next_to_search = set() - else: - current_search = set(islice(next_to_search, 100)) - next_to_search -= current_search - - # Check if state groups are referenced - sql = """ - SELECT DISTINCT state_group FROM event_to_state_groups - LEFT JOIN events_to_purge AS ep USING (event_id) - WHERE state_group IN (%s) AND ep.event_id IS NULL - """ % (",".join("?" for _ in current_search),) - txn.execute(sql, list(current_search)) - - referenced = set(sg for sg, in txn) - referenced_groups |= referenced - - # We don't continue iterating up the state group graphs for state - # groups that are referenced. - current_search -= referenced - - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=current_search, - keyvalues={}, - retcols=("prev_state_group", "state_group",), - ) - - prevs = set(row["state_group"] for row in rows) - # We don't bother re-handling groups we've already seen - prevs -= state_groups_seen - next_to_search |= prevs - state_groups_seen |= prevs - - for row in rows: - # Note: Each state group can have at most one prev group - graph[row["state_group"]] = row["prev_state_group"] - - to_delete = state_groups_seen - referenced_groups - - to_dedelta = set() - for sg in referenced_groups: - prev_sg = graph.get(sg) - if prev_sg and prev_sg in to_delete: - to_dedelta.add(sg) - - return to_delete, to_dedelta - class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. -- cgit 1.4.1 From a163b748a5ca37853f440c5c46d2da80f738a9e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:34:21 +0000 Subject: Don't truncate command name in metrics --- synapse/replication/tcp/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 5dc7b3fffc..0b3fe6cbf5 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -656,7 +656,7 @@ tcp_inbound_commands = LaterGauge( "", ["command", "name"], lambda: { - (k[0], p.name,): count + (k, p.name,): count for p in connected_connections for k, count in iteritems(p.inbound_commands_counter) }, @@ -667,7 +667,7 @@ tcp_outbound_commands = LaterGauge( "", ["command", "name"], lambda: { - (k[0], p.name,): count + (k, p.name,): count for p in connected_connections for k, count in iteritems(p.outbound_commands_counter) }, -- cgit 1.4.1 From 88e5ffe6fe816e54a5471728e93fde63353d9a70 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:34:34 +0000 Subject: Deduplicate device updates sent over replication We currently send several kHz of device list updates over replication occisonally, which often causes the replications streams to lag and then get dropped. A lot of those updates will actually be duplicates, since we don't send e.g. device_ids across replication, so let's deduplicate it when we pull them out of the database. --- synapse/storage/devices.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d10ff9e4b9..62497ab63f 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -589,10 +589,14 @@ class DeviceStore(SQLBaseStore): combined list of changes to devices, and which destinations need to be poked. `destination` may be None if no destinations need to be poked. """ + # We do a group by here as there can be a large number of duplicate + # entries, since we throw away device IDs. sql = """ - SELECT stream_id, user_id, destination FROM device_lists_stream + SELECT MAX(stream_id) AS stream_id, user_id, destination + FROM device_lists_stream LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id) WHERE ? < stream_id AND stream_id <= ? + GROUP BY user_id, destination """ return self._execute( "get_all_device_list_changes_for_remotes", None, -- cgit 1.4.1 From 39f419868fa8df56e3d9df9ba8e153884fe3ea55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:38:09 +0000 Subject: Newsfile --- changelog.d/4109.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4109.misc diff --git a/changelog.d/4109.misc b/changelog.d/4109.misc new file mode 100644 index 0000000000..566c683119 --- /dev/null +++ b/changelog.d/4109.misc @@ -0,0 +1 @@ +Reduce replication traffic for device lists -- cgit 1.4.1 From 4f0fa7a1201b0b76763dc146db9af12c4dd29494 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 18:15:42 +0000 Subject: Newsfile --- changelog.d/4110.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4110.misc diff --git a/changelog.d/4110.misc b/changelog.d/4110.misc new file mode 100644 index 0000000000..a50327ae34 --- /dev/null +++ b/changelog.d/4110.misc @@ -0,0 +1 @@ +Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character -- cgit 1.4.1 From 0dce9e1379ea867c9a00c8e6cf1d42badb52601d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Oct 2018 23:55:43 +1100 Subject: Write some tests for the email pusher (#4095) --- .travis.yml | 11 ++-- changelog.d/4095.bugfix | 1 + synapse/push/emailpusher.py | 5 +- synapse/push/mailer.py | 10 +-- synapse/server.py | 5 ++ tests/push/__init__.py | 0 tests/push/test_email.py | 148 ++++++++++++++++++++++++++++++++++++++++++++ tests/server.py | 4 +- tests/test_mau.py | 2 +- tests/unittest.py | 9 ++- 10 files changed, 182 insertions(+), 13 deletions(-) create mode 100644 changelog.d/4095.bugfix create mode 100644 tests/push/__init__.py create mode 100644 tests/push/test_email.py diff --git a/.travis.yml b/.travis.yml index fd41841c77..655fab9d8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,9 @@ branches: - develop - /^release-v/ +# When running the tox environments that call Twisted Trial, we can pass the -j +# flag to run the tests concurrently. We set this to 2 for CPU bound tests +# (SQLite) and 4 for I/O bound tests (PostgreSQL). matrix: fast_finish: true include: @@ -33,10 +36,10 @@ matrix: env: TOX_ENV="pep8,check_isort" - python: 2.7 - env: TOX_ENV=py27 + env: TOX_ENV=py27 TRIAL_FLAGS="-j 2" - python: 2.7 - env: TOX_ENV=py27-old + env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2" - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" @@ -44,10 +47,10 @@ matrix: - postgresql - python: 3.5 - env: TOX_ENV=py35 + env: TOX_ENV=py35 TRIAL_FLAGS="-j 2" - python: 3.6 - env: TOX_ENV=py36 + env: TOX_ENV=py36 TRIAL_FLAGS="-j 2" - python: 3.6 env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" diff --git a/changelog.d/4095.bugfix b/changelog.d/4095.bugfix new file mode 100644 index 0000000000..76ee7148c2 --- /dev/null +++ b/changelog.d/4095.bugfix @@ -0,0 +1 @@ +Fix exceptions when using the email mailer on Python 3. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index f369124258..50e1007d84 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -85,7 +85,10 @@ class EmailPusher(object): self.timed_call = None def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + if self.max_stream_ordering: + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + else: + self.max_stream_ordering = max_stream_ordering self._start_processing() def on_new_receipts(self, min_stream_id, max_stream_id): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 16fb5e8471..ebcb93bfc7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -26,7 +26,6 @@ import bleach import jinja2 from twisted.internet import defer -from twisted.mail.smtp import sendmail from synapse.api.constants import EventTypes from synapse.api.errors import StoreError @@ -85,6 +84,7 @@ class Mailer(object): self.notif_template_html = notif_template_html self.notif_template_text = notif_template_text + self.sendmail = self.hs.get_sendmail() self.store = self.hs.get_datastore() self.macaroon_gen = self.hs.get_macaroon_generator() self.state_handler = self.hs.get_state_handler() @@ -191,11 +191,11 @@ class Mailer(object): multipart_msg.attach(html_part) logger.info("Sending email push notification to %s" % email_address) - # logger.debug(html_text) - yield sendmail( + yield self.sendmail( self.hs.config.email_smtp_host, - raw_from, raw_to, multipart_msg.as_string(), + raw_from, raw_to, multipart_msg.as_string().encode('utf8'), + reactor=self.hs.get_reactor(), port=self.hs.config.email_smtp_port, requireAuthentication=self.hs.config.email_smtp_user is not None, username=self.hs.config.email_smtp_user, @@ -333,7 +333,7 @@ class Mailer(object): notif_events, user_id, reason): if len(notifs_by_room) == 1: # Only one room has new stuff - room_id = notifs_by_room.keys()[0] + room_id = list(notifs_by_room.keys())[0] # If the room has some kind of name, use it, but we don't # want the generated-from-names one here otherwise we'll diff --git a/synapse/server.py b/synapse/server.py index cf6b872cbd..9985687b95 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,6 +23,7 @@ import abc import logging from twisted.enterprise import adbapi +from twisted.mail.smtp import sendmail from twisted.web.client import BrowserLikePolicyForHTTPS from synapse.api.auth import Auth @@ -174,6 +175,7 @@ class HomeServer(object): 'message_handler', 'pagination_handler', 'room_context_handler', + 'sendmail', ] # This is overridden in derived application classes @@ -269,6 +271,9 @@ class HomeServer(object): def build_room_creation_handler(self): return RoomCreationHandler(self) + def build_sendmail(self): + return sendmail + def build_state_handler(self): return StateHandler(self) diff --git a/tests/push/__init__.py b/tests/push/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/push/test_email.py b/tests/push/test_email.py new file mode 100644 index 0000000000..50ee6910d1 --- /dev/null +++ b/tests/push/test_email.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pkg_resources + +from twisted.internet.defer import Deferred + +from synapse.rest.client.v1 import admin, login, room + +from tests.unittest import HomeserverTestCase + +try: + from synapse.push.mailer import load_jinja2_templates +except Exception: + load_jinja2_templates = None + + +class EmailPusherTests(HomeserverTestCase): + + skip = "No Jinja installed" if not load_jinja2_templates else None + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def make_homeserver(self, reactor, clock): + + # List[Tuple[Deferred, args, kwargs]] + self.email_attempts = [] + + def sendmail(*args, **kwargs): + d = Deferred() + self.email_attempts.append((d, args, kwargs)) + return d + + config = self.default_config() + config.email_enable_notifs = True + config.start_pushers = True + + config.email_template_dir = os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ) + config.email_notif_template_html = "notif_mail.html" + config.email_notif_template_text = "notif_mail.txt" + config.email_smtp_host = "127.0.0.1" + config.email_smtp_port = 20 + config.require_transport_security = False + config.email_smtp_user = None + config.email_app_name = "Matrix" + config.email_notif_from = "test@example.com" + + hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + + return hs + + def test_sends_email(self): + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastore().get_user_by_access_token(access_token) + ) + token_id = user_tuple["token_id"] + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="email", + app_id="m.email", + app_display_name="Email Notifications", + device_display_name="a@example.com", + pushkey="a@example.com", + lang=None, + data={}, + ) + ) + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Invite the other person + self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=other_access_token) + self.helper.send(room, body="There!", tok=other_access_token) + + # Get the stream ordering before it gets sent + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + last_stream_ordering = pushers[0]["last_stream_ordering"] + + # Advance time a bit, so the pusher will register something has happened + self.pump(100) + + # It hasn't succeeded yet, so the stream ordering shouldn't have moved + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"]) + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # Make the email succeed + self.email_attempts[0][0].callback(True) + self.pump() + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # The stream ordering has increased + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering) diff --git a/tests/server.py b/tests/server.py index 7bee58dff1..819c854448 100644 --- a/tests/server.py +++ b/tests/server.py @@ -125,7 +125,9 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe req.content = BytesIO(content) if access_token: - req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) + req.requestHeaders.addRawHeader( + b"Authorization", b"Bearer " + access_token.encode('ascii') + ) if content: req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") diff --git a/tests/test_mau.py b/tests/test_mau.py index bdbacb8448..5d387851c5 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -207,7 +207,7 @@ class TestMauLimit(unittest.TestCase): def do_sync_for_user(self, token): request, channel = make_request( - "GET", "/sync", access_token=token.encode('ascii') + "GET", "/sync", access_token=token ) render(request, self.resource, self.reactor) diff --git a/tests/unittest.py b/tests/unittest.py index a59291cc60..4d40bdb6a5 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -146,6 +146,13 @@ def DEBUG(target): return target +def INFO(target): + """A decorator to set the .loglevel attribute to logging.INFO. + Can apply to either a TestCase or an individual test method.""" + target.loglevel = logging.INFO + return target + + class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. @@ -373,5 +380,5 @@ class HomeserverTestCase(TestCase): self.render(request) self.assertEqual(channel.code, 200) - access_token = channel.json_body["access_token"].encode('ascii') + access_token = channel.json_body["access_token"] return access_token -- cgit 1.4.1 From 2e223a8c22ef7f65aa42fd149f178a842b60e3c7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:24:59 +1100 Subject: Remove the unused /pull federation API (#4118) --- changelog.d/4118.removal | 1 + synapse/federation/federation_server.py | 5 ----- synapse/federation/transport/server.py | 9 --------- 3 files changed, 1 insertion(+), 14 deletions(-) create mode 100644 changelog.d/4118.removal diff --git a/changelog.d/4118.removal b/changelog.d/4118.removal new file mode 100644 index 0000000000..6fb1d67b47 --- /dev/null +++ b/changelog.d/4118.removal @@ -0,0 +1 @@ +The obsolete and non-functional /pull federation endpoint has been removed. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 0f9302a6a8..fa2cc550e2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -323,11 +323,6 @@ class FederationServer(FederationBase): else: defer.returnValue((404, "")) - @defer.inlineCallbacks - @log_function - def on_pull_request(self, origin, versions): - raise NotImplementedError("Pull transactions not implemented") - @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7288d49074..3553f418f1 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet): defer.returnValue((code, response)) -class FederationPullServlet(BaseFederationServlet): - PATH = "/pull/" - - # This is for when someone asks us for everything since version X - def on_GET(self, origin, content, query): - return self.handler.on_pull_request(query["origin"][0], query["v"]) - - class FederationEventServlet(BaseFederationServlet): PATH = "/event/(?P[^/]*)/" @@ -1261,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, - FederationPullServlet, FederationEventServlet, FederationStateServlet, FederationStateIdsServlet, -- cgit 1.4.1 From 3bade14ec0aa7e56c84d30241bd86a177f0699d6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 0000000000..66dcfb18b9 --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd11..80e7b15de8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777ff..a824be9a62 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) -- cgit 1.4.1 From 086e1a8f3e873351a749cb8b341cd2adbe701049 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 0000000000..66dcfb18b9 --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd11..80e7b15de8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777ff..a824be9a62 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) -- cgit 1.4.1 From 67c1924899721e57093470d2f449f745a983b1d9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 06:27:05 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index c8377df7b2..97a57a7ac1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc1" +__version__ = "0.33.8rc2" -- cgit 1.4.1 From e615e95590caa550de3199234c10761398521e10 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 06:28:11 +1100 Subject: changelog --- CHANGES.md | 10 ++++++++++ changelog.d/4122.bugfix | 1 - 2 files changed, 10 insertions(+), 1 deletion(-) delete mode 100644 changelog.d/4122.bugfix diff --git a/CHANGES.md b/CHANGES.md index dc2a745c8b..560b406297 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 0.33.8rc2 (2018-10-31) +============================== + +Bugfixes +-------- + +- Searches that request profile info now no longer fail with a 500. Fixes + a regression in 0.33.8rc1. ([\#4122](https://github.com/matrix-org/synapse/issues/4122)) + + Synapse 0.33.8rc1 (2018-10-29) ============================== diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix deleted file mode 100644 index 66dcfb18b9..0000000000 --- a/changelog.d/4122.bugfix +++ /dev/null @@ -1 +0,0 @@ -Searches that request profile info now no longer fail with a 500. -- cgit 1.4.1 From 0f6ec6d1aedc88a2057f50b77ce9d6a405177096 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 30 Oct 2018 21:00:31 +0000 Subject: Attempt to fix tox installs It seems that, at some point, the ability to run tox on old servers (with old setuptools) got broken - and it was only working on our Jenkins instance by dint of reusing the tox environments. Let's try to get tox to do the right thing, and remove the guff from jenkins/prepare_synapse.sh. (There is a separate question about whether the jenkins builds should be using tox to prepare the virtualenv at all here, but that is somewhat orthogonal). --- jenkins/prepare_synapse.sh | 19 ------------------- tox.ini | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index d95ca846c4..016afb8baa 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -14,22 +14,3 @@ fi # set up the virtualenv tox -e py27 --notest -v - -TOX_BIN=$TOX_DIR/py27/bin - -# cryptography 2.2 requires setuptools >= 18.5. -# -# older versions of virtualenv (?) give us a virtualenv with the same version -# of setuptools as is installed on the system python (and tox runs virtualenv -# under python3, so we get the version of setuptools that is installed on that). -# -# anyway, make sure that we have a recent enough setuptools. -$TOX_BIN/pip install 'setuptools>=18.5' - -# we also need a semi-recent version of pip, because old ones fail to install -# the "enum34" dependency of cryptography. -$TOX_BIN/pip install 'pip>=10' - -{ python synapse/python_dependencies.py - echo lxml -} | xargs $TOX_BIN/pip install diff --git a/tox.ini b/tox.ini index 9de5a5704a..920211bf50 100644 --- a/tox.ini +++ b/tox.ini @@ -11,6 +11,20 @@ deps = # needed by some of the tests lxml + # cyptography 2.2 requires setuptools >= 18.5 + # + # older versions of virtualenv (?) give us a virtualenv with the same + # version of setuptools as is installed on the system python (and tox runs + # virtualenv under python3, so we get the version of setuptools that is + # installed on that). + # + # anyway, make sure that we have a recent enough setuptools. + setuptools>=18.5 + + # we also need a semi-recent version of pip, because old ones fail to + # install the "enum34" dependency of cryptography. + pip>=10 + setenv = PYTHONDONTWRITEBYTECODE = no_byte_code -- cgit 1.4.1 From a2d8bff0dc430f9e0a980535dd4330ff420118ee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 30 Oct 2018 21:21:05 +0000 Subject: changelog --- changelog.d/4124.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4124.misc diff --git a/changelog.d/4124.misc b/changelog.d/4124.misc new file mode 100644 index 0000000000..28f438b9b2 --- /dev/null +++ b/changelog.d/4124.misc @@ -0,0 +1 @@ +Fix `tox` failure on old systems -- cgit 1.4.1 From f79f45448527f22f3813e38233521a5e13e9223e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 22:29:02 +1100 Subject: Remove deprecated v1 key exchange endpoint (#4119) --- changelog.d/4119.removal | 1 + synapse/api/urls.py | 1 - synapse/app/homeserver.py | 7 +-- synapse/rest/key/v1/__init__.py | 14 ----- synapse/rest/key/v1/server_key_resource.py | 92 ------------------------------ 5 files changed, 2 insertions(+), 113 deletions(-) create mode 100644 changelog.d/4119.removal delete mode 100644 synapse/rest/key/v1/__init__.py delete mode 100644 synapse/rest/key/v1/server_key_resource.py diff --git a/changelog.d/4119.removal b/changelog.d/4119.removal new file mode 100644 index 0000000000..81383ece6b --- /dev/null +++ b/changelog.d/4119.removal @@ -0,0 +1 @@ +The deprecated v1 key exchange endpoints have been removed. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 6d9f1ca0ef..f78695b657 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" -SERVER_KEY_PREFIX = "/_matrix/key/v1" SERVER_KEY_V2_PREFIX = "/_matrix/key/v2" MEDIA_PREFIX = "/_matrix/media/r0" LEGACY_MEDIA_PREFIX = "/_matrix/media/v1" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 593e1e75db..415374a2ce 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -37,7 +37,6 @@ from synapse.api.urls import ( FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, - SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, STATIC_PREFIX, WEB_CLIENT_PREFIX, @@ -59,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource -from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer @@ -236,10 +234,7 @@ class SynapseHomeServer(HomeServer): ) if name in ["keys", "federation"]: - resources.update({ - SERVER_KEY_PREFIX: LocalKey(self), - SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), - }) + resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py deleted file mode 100644 index fe0ac3f8e9..0000000000 --- a/synapse/rest/key/v1/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py deleted file mode 100644 index 38eb2ee23f..0000000000 --- a/synapse/rest/key/v1/server_key_resource.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging - -from canonicaljson import encode_canonical_json -from signedjson.sign import sign_json -from unpaddedbase64 import encode_base64 - -from OpenSSL import crypto -from twisted.web.resource import Resource - -from synapse.http.server import respond_with_json_bytes - -logger = logging.getLogger(__name__) - - -class LocalKey(Resource): - """HTTP resource containing encoding the TLS X.509 certificate and NACL - signature verification keys for this server:: - - GET /key HTTP/1.1 - - HTTP/1.1 200 OK - Content-Type: application/json - { - "server_name": "this.server.example.com" - "verify_keys": { - "algorithm:version": # base64 encoded NACL verification key. - }, - "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert. - "signatures": { - "this.server.example.com": { - "algorithm:version": # NACL signature for this server. - } - } - } - """ - - def __init__(self, hs): - self.response_body = encode_canonical_json( - self.response_json_object(hs.config) - ) - Resource.__init__(self) - - @staticmethod - def response_json_object(server_config): - verify_keys = {} - for key in server_config.signing_key: - verify_key_bytes = key.verify_key.encode() - key_id = "%s:%s" % (key.alg, key.version) - verify_keys[key_id] = encode_base64(verify_key_bytes) - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, - server_config.tls_certificate - ) - json_object = { - u"server_name": server_config.server_name, - u"verify_keys": verify_keys, - u"tls_certificate": encode_base64(x509_certificate_bytes) - } - for key in server_config.signing_key: - json_object = sign_json( - json_object, - server_config.server_name, - key, - ) - - return json_object - - def render_GET(self, request): - return respond_with_json_bytes( - request, 200, self.response_body, - ) - - def getChild(self, name, request): - if name == b'': - return self -- cgit 1.4.1 From 916efc824950f924c3f7bced09b9cd5759b1532e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 23:14:39 +1100 Subject: Remove fetching keys via the deprecated v1 kex method (#4120) --- changelog.d/4120.removal | 1 + synapse/crypto/keyclient.py | 8 ++-- synapse/crypto/keyring.py | 110 +++----------------------------------------- 3 files changed, 13 insertions(+), 106 deletions(-) create mode 100644 changelog.d/4120.removal diff --git a/changelog.d/4120.removal b/changelog.d/4120.removal new file mode 100644 index 0000000000..a7a567098f --- /dev/null +++ b/changelog.d/4120.removal @@ -0,0 +1 @@ +Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 080c81f14b..d40e4b8591 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -15,6 +15,8 @@ import logging +from six.moves import urllib + from canonicaljson import json from twisted.internet import defer, reactor @@ -28,15 +30,15 @@ from synapse.util import logcontext logger = logging.getLogger(__name__) -KEY_API_V1 = b"/_matrix/key/v1/" +KEY_API_V2 = "/_matrix/key/v2/server/%s" @defer.inlineCallbacks -def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): +def fetch_server_key(server_name, tls_client_options_factory, key_id): """Fetch the keys for a remote server.""" factory = SynapseKeyClientFactory() - factory.path = path + factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), ) factory.host = server_name endpoint = matrix_federation_endpoint( reactor, server_name, tls_client_options_factory, timeout=30 diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d89f94c219..515ebbc148 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2017 New Vector Ltd. +# Copyright 2017, 2018 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ import hashlib import logging from collections import namedtuple -from six.moves import urllib - from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -395,32 +393,13 @@ class Keyring(object): @defer.inlineCallbacks def get_keys_from_server(self, server_name_and_key_ids): - @defer.inlineCallbacks - def get_key(server_name, key_ids): - keys = None - try: - keys = yield self.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except Exception as e: - logger.info( - "Unable to get key %r for %r directly: %s %s", - key_ids, server_name, - type(e).__name__, str(e), - ) - - if not keys: - keys = yield self.get_server_verify_key_v1_direct( - server_name, key_ids - ) - - keys = {server_name: keys} - - defer.returnValue(keys) - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - run_in_background(get_key, server_name, key_ids) + run_in_background( + self.get_server_verify_key_v2_direct, + server_name, + key_ids, + ) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, @@ -525,10 +504,7 @@ class Keyring(object): continue (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory, - path=("/_matrix/key/v2/server/%s" % ( - urllib.parse.quote(requested_key_id), - )).encode("ascii"), + server_name, self.hs.tls_client_options_factory, requested_key_id ) if (u"signatures" not in response @@ -657,78 +633,6 @@ class Keyring(object): defer.returnValue(results) - @defer.inlineCallbacks - def get_server_verify_key_v1_direct(self, server_name, key_ids): - """Finds a verification key for the server with one of the key ids. - Args: - server_name (str): The name of the server to fetch a key for. - keys_ids (list of str): The key_ids to check for. - """ - - # Try to fetch the key from the remote server. - - (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory - ) - - # Check the response. - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, tls_certificate - ) - - if ("signatures" not in response - or server_name not in response["signatures"]): - raise KeyLookupError("Key response not signed by remote server") - - if "tls_certificate" not in response: - raise KeyLookupError("Key response missing TLS certificate") - - tls_certificate_b64 = response["tls_certificate"] - - if encode_base64(x509_certificate_bytes) != tls_certificate_b64: - raise KeyLookupError("TLS certificate doesn't match") - - # Cache the result in the datastore. - - time_now_ms = self.clock.time_msec() - - verify_keys = {} - for key_id, key_base64 in response["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.time_added = time_now_ms - verify_keys[key_id] = verify_key - - for key_id in response["signatures"][server_name]: - if key_id not in response["verify_keys"]: - raise KeyLookupError( - "Key response must include verification keys for all" - " signatures" - ) - if key_id in verify_keys: - verify_signed_json( - response, - server_name, - verify_keys[key_id] - ) - - yield self.store.store_server_certificate( - server_name, - server_name, - time_now_ms, - tls_certificate, - ) - - yield self.store_keys( - server_name=server_name, - from_server=server_name, - verify_keys=verify_keys, - ) - - defer.returnValue(verify_keys) - def store_keys(self, server_name, from_server, verify_keys): """Store a collection of verify keys for a given server Args: -- cgit 1.4.1 From e3758c8c929be11fe38cebb2b4f7d43185f80197 Mon Sep 17 00:00:00 2001 From: Jonas Schürmann Date: Wed, 31 Oct 2018 15:46:47 +0100 Subject: Fix typo in docker-compose.yml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Schürmann --- contrib/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 3a8dfbae34..b1f6fcb7da 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -47,4 +47,4 @@ services: # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data # .. or store them on some high performance storage for better results - # - /path/to/ssd/storage:/var/lib/postfesql/data + # - /path/to/ssd/storage:/var/lib/postgresql/data -- cgit 1.4.1 From 9b827c40ca71510390c92472f7ec5cfcff9e69b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 31 Oct 2018 15:42:23 +0000 Subject: Log some bits about event creation (#4121) I found these helpful in debugging my room upgrade tests. --- changelog.d/4121.misc | 1 + synapse/handlers/message.py | 3 +++ synapse/handlers/room.py | 4 ++++ 3 files changed, 8 insertions(+) create mode 100644 changelog.d/4121.misc diff --git a/changelog.d/4121.misc b/changelog.d/4121.misc new file mode 100644 index 0000000000..9c29d80c3f --- /dev/null +++ b/changelog.d/4121.misc @@ -0,0 +1 @@ +Log some bits about room creation diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 969e588e73..a7cd779b02 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -427,6 +427,9 @@ class EventCreationHandler(object): if event.is_state(): prev_state = yield self.deduplicate_state_event(event, context) + logger.info( + "Not bothering to persist duplicate state event %s", event.event_id, + ) if prev_state is not None: defer.returnValue(prev_state) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1a..fe960342b9 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -104,6 +104,8 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=r["is_public"], ) + logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) + # we create and auth the tombstone event before properly creating the new # room, to check our user has perms in the old room. tombstone_event, tombstone_context = ( @@ -522,6 +524,7 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) + logger.info("Sending %s in new room", etype) yield self.event_creation_handler.create_and_send_nonmember_event( creator, event, @@ -544,6 +547,7 @@ class RoomCreationHandler(BaseHandler): content=creation_content, ) + logger.info("Sending %s in new room", EventTypes.Member) yield self.room_member_handler.update_membership( creator, creator.user, -- cgit 1.4.1 From 94c7fadc98542d582ff67c5ac788081c0d836e6b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 15:11:35 +0100 Subject: Attempt to move room aliases on room upgrades --- changelog.d/4101.feature | 1 + synapse/handlers/directory.py | 34 +++++++++--- synapse/handlers/room.py | 121 +++++++++++++++++++++++++++++++++++++++--- 3 files changed, 142 insertions(+), 14 deletions(-) create mode 100644 changelog.d/4101.feature diff --git a/changelog.d/4101.feature b/changelog.d/4101.feature new file mode 100644 index 0000000000..a3f7dbdcdd --- /dev/null +++ b/changelog.d/4101.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 7d67bf803a..0699731c13 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -138,9 +138,30 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def delete_association(self, requester, room_alias): - # association deletion for human users + def delete_association(self, requester, room_alias, send_event=True): + """Remove an alias from the directory + (this is only meant for human users; AS users should call + delete_appservice_association) + + Args: + requester (Requester): + room_alias (RoomAlias): + send_event (bool): Whether to send an updated m.room.aliases event. + Note that, if we delete the canonical alias, we will always attempt + to send an m.room.canonical_alias event + + Returns: + Deferred[unicode]: room id that the alias used to point to + + Raises: + NotFoundError: if the alias doesn't exist + + AuthError: if the user doesn't have perms to delete the alias (ie, the user + is neither the creator of the alias, nor a server admin. + + SynapseError: if the alias belongs to an AS + """ user_id = requester.user.to_string() try: @@ -168,10 +189,11 @@ class DirectoryHandler(BaseHandler): room_id = yield self._delete_association(room_alias) try: - yield self.send_room_alias_update_event( - requester, - room_id - ) + if send_event: + yield self.send_room_alias_update_event( + requester, + room_id + ) yield self._update_canonical_alias( requester, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1a..76811050a6 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,10 +136,15 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # and finally, shut down the PLs in the old room, and update them in the new - # room. old_room_state = yield tombstone_context.get_current_state_ids(self.store) + # update any aliases + yield self._move_aliases_to_new_room( + requester, old_room_id, new_room_id, old_room_state, + ) + + # and finally, shut down the PLs in the old room, and update them in the new + # room. yield self._update_upgraded_room_pls( requester, old_room_id, new_room_id, old_room_state, ) @@ -245,11 +250,6 @@ class RoomCreationHandler(BaseHandler): if not self.spam_checker.user_may_create_room(user_id): raise SynapseError(403, "You are not permitted to create rooms") - # XXX check alias is free - # canonical_alias = None - - # XXX create association in directory handler - creation_content = { "room_version": new_room_version, "predecessor": { @@ -295,7 +295,112 @@ class RoomCreationHandler(BaseHandler): # XXX invites/joins # XXX 3pid invites - # XXX directory_handler.send_room_alias_update_event + + @defer.inlineCallbacks + def _move_aliases_to_new_room( + self, requester, old_room_id, new_room_id, old_room_state, + ): + directory_handler = self.hs.get_handlers().directory_handler + + aliases = yield self.store.get_aliases_for_room(old_room_id) + + # check to see if we have a canonical alias. + canonical_alias = None + canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, "")) + if canonical_alias_event_id: + canonical_alias_event = yield self.store.get_event(canonical_alias_event_id) + if canonical_alias_event: + canonical_alias = canonical_alias_event.content.get("alias", "") + + # first we try to remove the aliases from the old room (we suppress sending + # the room_aliases event until the end). + # + # Note that we'll only be able to remove aliases that (a) aren't owned by an AS, + # and (b) unless the user is a server admin, which the user created. + # + # This is probably correct - given we don't allow such aliases to be deleted + # normally, it would be odd to allow it in the case of doing a room upgrade - + # but it makes the upgrade less effective, and you have to wonder why a room + # admin can't remove aliases that point to that room anyway. + # (cf https://github.com/matrix-org/synapse/issues/2360) + # + removed_aliases = [] + for alias_str in aliases: + alias = RoomAlias.from_string(alias_str) + try: + yield directory_handler.delete_association( + requester, alias, send_event=False, + ) + except SynapseError as e: + logger.warning( + "Unable to remove alias %s from old room: %s", + alias, e, + ) + else: + removed_aliases.append(alias_str) + + # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest + # of this. + if not removed_aliases: + return + + try: + # this can fail if, for some reason, our user doesn't have perms to send + # m.room.aliases events in the old room (note that we've already checked that + # they have perms to send a tombstone event, so that's not terribly likely). + # + # If that happens, it's regrettable, but we should carry on: it's the same + # as when you remove an alias from the directory normally - it just means that + # the aliases event gets out of sync with the directory + # (cf https://github.com/vector-im/riot-web/issues/2369) + yield directory_handler.send_room_alias_update_event( + requester, old_room_id, + ) + except AuthError as e: + logger.warning( + "Failed to send updated alias event on old room: %s", e, + ) + + # we can now add any aliases we successfully removed to the new room. + for alias in removed_aliases: + try: + yield directory_handler.create_association( + requester, RoomAlias.from_string(alias), + new_room_id, servers=(self.hs.hostname, ), + send_event=False, + ) + logger.info("Moved alias %s to new room", alias) + except SynapseError as e: + # I'm not really expecting this to happen, but it could if the spam + # checking module decides it shouldn't, or similar. + logger.error( + "Error adding alias %s to new room: %s", + alias, e, + ) + + try: + if canonical_alias and (canonical_alias in removed_aliases): + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.CanonicalAlias, + "state_key": "", + "room_id": new_room_id, + "sender": requester.user.to_string(), + "content": {"alias": canonical_alias, }, + }, + ratelimit=False + ) + + yield directory_handler.send_room_alias_update_event( + requester, new_room_id, + ) + except SynapseError as e: + # again I'm not really expecting this to fail, but if it does, I'd rather + # we returned the new room to the client at this point. + logger.error( + "Unable to send updated alias events in new room: %s", e, + ) @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, -- cgit 1.4.1 From 0f8591a5a8695aa176736c651a361c40cf228b6d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 29 Oct 2018 15:20:19 +0000 Subject: Avoid else clause on exception for clarity --- synapse/handlers/room.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 76811050a6..9ff4656717 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -331,13 +331,12 @@ class RoomCreationHandler(BaseHandler): yield directory_handler.delete_association( requester, alias, send_event=False, ) + removed_aliases.append(alias_str) except SynapseError as e: logger.warning( "Unable to remove alias %s from old room: %s", alias, e, ) - else: - removed_aliases.append(alias_str) # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest # of this. -- cgit 1.4.1 From a8d41c6aff0e58fc24fae1fe4ae89d28541a63cb Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 31 Oct 2018 13:19:28 -0600 Subject: Include a version query string arg for the consent route --- synapse/handlers/auth.py | 5 ++++- synapse/rest/client/v2_alpha/auth.py | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d143522d9a..85fc1fc525 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -473,7 +473,10 @@ class AuthHandler(BaseHandler): "version": self.hs.config.user_consent_version, "en": { "name": "Privacy Policy", - "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + "url": "%s/_matrix/consent?v=%s" % ( + self.hs.config.public_baseurl, + self.hs.config.user_consent_version, + ), }, }, }, diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 6f90935b22..a8d8ed6590 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -161,8 +161,9 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent" % ( + 'terms_url': "%s/_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, + self.hs.config.user_consent_version, ), 'myurl': "%s/auth/%s/fallback/web" % ( CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS @@ -241,8 +242,9 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent" % ( + 'terms_url': "%s/_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, + self.hs.config.user_consent_version, ), 'myurl': "%s/auth/%s/fallback/web" % ( CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS -- cgit 1.4.1 From a8c9faa9a2517f7d733f58bea574e5c147c5b328 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 31 Oct 2018 13:28:08 -0600 Subject: The tests also need a version parameter --- tests/test_terms_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 06b68f0a72..7deab5266f 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -67,7 +67,7 @@ class TermsTestCase(unittest.HomeserverTestCase): "privacy_policy": { "en": { "name": "Privacy Policy", - "url": "https://example.org/_matrix/consent", + "url": "https://example.org/_matrix/consent?v=1.0", }, "version": "1.0" }, -- cgit 1.4.1 From aa98e3889667bbbd4186f25336e1a9a5666113d2 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 1 Nov 2018 21:28:35 +1100 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 97a57a7ac1..89ea9a9775 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc2" +__version__ = "0.33.8" -- cgit 1.4.1 From d0ebe8287196b9a7798503c1dfe174a6920bdc81 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 1 Nov 2018 21:29:11 +1100 Subject: changelog --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 560b406297..8302610585 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.33.8 (2018-11-01) +=========================== + +No significant changes. + + Synapse 0.33.8rc2 (2018-10-31) ============================== -- cgit 1.4.1 From b3dd6fa98199f58804df903195b6c5a1cc4686d7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Nov 2018 11:43:46 +0000 Subject: Add STATE_V2_TEST room version --- synapse/api/constants.py | 7 ++++++- synapse/state/__init__.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 5565e516d6..e63b1e8a38 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -102,6 +102,7 @@ class ThirdPartyEntityKind(object): class RoomVersions(object): V1 = "1" VDH_TEST = "vdh-test-version" + STATE_V2_TEST = "state-v2-test" # the version we will give rooms which are created on this server @@ -109,7 +110,11 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1 # vdh-test-version is a placeholder to get room versioning support working and tested # until we have a working v2. -KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST} +KNOWN_ROOM_VERSIONS = { + RoomVersions.V1, + RoomVersions.VDH_TEST, + RoomVersions.STATE_V2_TEST, +} ServerNoticeMsgType = "m.server_notice" ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9b40b18d5b..943d5d6bb5 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -607,7 +607,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto return v1.resolve_events_with_store( state_sets, event_map, state_res_store.get_events, ) - elif room_version == RoomVersions.VDH_TEST: + elif room_version in (RoomVersions.VDH_TEST, RoomVersions.STATE_V2_TEST): return v2.resolve_events_with_store( state_sets, event_map, state_res_store, ) -- cgit 1.4.1 From 62d683161eb2548cd4e81659519229d24ba48f37 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Nov 2018 11:44:44 +0000 Subject: Newsfile --- changelog.d/4128.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4128.misc diff --git a/changelog.d/4128.misc b/changelog.d/4128.misc new file mode 100644 index 0000000000..76ab4b085c --- /dev/null +++ b/changelog.d/4128.misc @@ -0,0 +1 @@ +Add STATE_V2_TEST room version -- cgit 1.4.1 From 642505abc385afe7849f60c37f8ef99592f8f7b4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 1 Nov 2018 16:47:05 -0600 Subject: Fix logic error that prevented guests from seeing the privacy policy --- synapse/rest/consent/consent_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 4cadd71d7e..89b82b0591 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -141,7 +141,7 @@ class ConsentResource(Resource): username = parse_string(request, "u", required=False, default="") userhmac = None has_consented = False - public_version = username != "" + public_version = username == "" if not public_version: userhmac = parse_string(request, "h", required=True, encoding=None) -- cgit 1.4.1 From 552f090f62e2f48b6a71aaf547fae7a33e03e935 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 1 Nov 2018 16:51:11 -0600 Subject: Changelog --- changelog.d/4133.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4133.feature diff --git a/changelog.d/4133.feature b/changelog.d/4133.feature new file mode 100644 index 0000000000..ef5cdaf5ec --- /dev/null +++ b/changelog.d/4133.feature @@ -0,0 +1 @@ +Add `m.login.terms` to the registration flow when consent tracking is enabled. **This makes the template arguments conditionally optional on a new `public_version` variable - update your privacy templates to support this.** -- cgit 1.4.1 From 54aec35867d24bafa870e5b437a11b9a0c502658 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Nov 2018 10:29:19 +0000 Subject: Fix None exception in state res v2 --- synapse/state/v2.py | 4 ++ tests/state/test_v2.py | 100 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 5d06f7e928..dbc9688c56 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -53,6 +53,10 @@ def resolve_events_with_store(state_sets, event_map, state_res_store): logger.debug("Computing conflicted state") + # We use event_map as a cache, so if its None we need to initialize it + if event_map is None: + event_map = {} + # First split up the un/conflicted state unconflicted_state, conflicted_state = _seperate(state_sets) diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index efd85ebe6c..d67f59b2c7 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -544,8 +544,7 @@ class StateTestCase(unittest.TestCase): state_res_store=TestStateResolutionStore(event_map), ) - self.assertTrue(state_d.called) - state_before = state_d.result + state_before = self.successResultOf(state_d) state_after = dict(state_before) if fake_event.state_key is not None: @@ -599,6 +598,103 @@ class LexicographicalTestCase(unittest.TestCase): self.assertEqual(["o", "l", "n", "m", "p"], res) +class SimpleParamStateTestCase(unittest.TestCase): + def setUp(self): + # We build up a simple DAG. + + event_map = {} + + create_event = FakeEvent( + id="CREATE", + sender=ALICE, + type=EventTypes.Create, + state_key="", + content={"creator": ALICE}, + ).to_event([], []) + event_map[create_event.event_id] = create_event + + alice_member = FakeEvent( + id="IMA", + sender=ALICE, + type=EventTypes.Member, + state_key=ALICE, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event([create_event.event_id], [create_event.event_id]) + event_map[alice_member.event_id] = alice_member + + join_rules = FakeEvent( + id="IJR", + sender=ALICE, + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.PUBLIC}, + ).to_event( + auth_events=[create_event.event_id, alice_member.event_id], + prev_events=[alice_member.event_id], + ) + event_map[join_rules.event_id] = join_rules + + # Bob and Charlie join at the same time, so there is a fork + bob_member = FakeEvent( + id="IMB", + sender=BOB, + type=EventTypes.Member, + state_key=BOB, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event( + auth_events=[create_event.event_id, join_rules.event_id], + prev_events=[join_rules.event_id], + ) + event_map[bob_member.event_id] = bob_member + + charlie_member = FakeEvent( + id="IMC", + sender=CHARLIE, + type=EventTypes.Member, + state_key=CHARLIE, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event( + auth_events=[create_event.event_id, join_rules.event_id], + prev_events=[join_rules.event_id], + ) + event_map[charlie_member.event_id] = charlie_member + + self.event_map = event_map + self.create_event = create_event + self.alice_member = alice_member + self.join_rules = join_rules + self.bob_member = bob_member + self.charlie_member = charlie_member + + self.state_at_bob = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, bob_member] + } + + self.state_at_charlie = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, charlie_member] + } + + self.expected_combined_state = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, bob_member, charlie_member] + } + + def test_event_map_none(self): + # Test that we correctly handle passing `None` as the event_map + + state_d = resolve_events_with_store( + [self.state_at_bob, self.state_at_charlie], + event_map=None, + state_res_store=TestStateResolutionStore(self.event_map), + ) + + state = self.successResultOf(state_d) + + self.assert_dict(self.expected_combined_state, state) + + def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) -- cgit 1.4.1 From f05d97e28331f83af374b81f0862479d3af34722 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Nov 2018 10:32:06 +0000 Subject: Newsfile --- changelog.d/4135.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4135.bugfix diff --git a/changelog.d/4135.bugfix b/changelog.d/4135.bugfix new file mode 100644 index 0000000000..6879b1c162 --- /dev/null +++ b/changelog.d/4135.bugfix @@ -0,0 +1 @@ +Fix exception when using state res v2 algorithm -- cgit 1.4.1 From 50e328d1e7a61268dbf274f10798ea5994c6c25a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Nov 2018 19:01:29 +0000 Subject: Remove redundant database locks for device list updates We can rely on the application-level per-user linearizer. --- synapse/storage/devices.py | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 62497ab63f..bc3f575b1e 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -239,7 +239,19 @@ class DeviceStore(SQLBaseStore): def update_remote_device_list_cache_entry(self, user_id, device_id, content, stream_id): - """Updates a single user's device in the cache. + """Updates a single device in the cache of a remote user's devicelist. + + Note: assumes that we are the only thread that can be updating this user's + device list. + + Args: + user_id (str): User to update device list for + device_id (str): ID of decivice being updated + content (dict): new data on this device + stream_id (int): the version of the device list + + Returns: + Deferred[None] """ return self.runInteraction( "update_remote_device_list_cache_entry", @@ -272,7 +284,11 @@ class DeviceStore(SQLBaseStore): }, values={ "content": json.dumps(content), - } + }, + + # we don't need to lock, because we assume we are the only thread + # updating this user's devices. + lock=False, ) txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,)) @@ -289,11 +305,26 @@ class DeviceStore(SQLBaseStore): }, values={ "stream_id": stream_id, - } + }, + + # again, we can assume we are the only thread updating this user's + # extremity. + lock=False, ) def update_remote_device_list_cache(self, user_id, devices, stream_id): - """Replace the cache of the remote user's devices. + """Replace the entire cache of the remote user's devices. + + Note: assumes that we are the only thread that can be updating this user's + device list. + + Args: + user_id (str): User to update device list for + devices (list[dict]): list of device objects supplied over federation + stream_id (int): the version of the device list + + Returns: + Deferred[None] """ return self.runInteraction( "update_remote_device_list_cache", @@ -338,7 +369,11 @@ class DeviceStore(SQLBaseStore): }, values={ "stream_id": stream_id, - } + }, + + # we don't need to lock, because we can assume we are the only thread + # updating this user's extremity. + lock=False, ) def get_devices_by_remote(self, destination, from_stream_id): -- cgit 1.4.1 From 350f654e7b80ff19d1fdf3861093cef09ccf3ab1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Nov 2018 19:10:33 +0000 Subject: Add unique indexes to a couple of tables The indexes on device_lists_remote_extremeties can be unique, and they therefore should, to ensure that the db remains consistent. --- synapse/storage/devices.py | 49 +++++++++++++++++++++- .../schema/delta/40/device_list_streams.sql | 9 ++-- .../delta/52/device_list_streams_unique_idx.sql | 36 ++++++++++++++++ 3 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index bc3f575b1e..ecdab34e7d 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -22,14 +22,19 @@ from twisted.internet import defer from synapse.api.errors import StoreError from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.background_updates import BackgroundUpdateStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from ._base import Cache, SQLBaseStore, db_to_json +from ._base import Cache, db_to_json logger = logging.getLogger(__name__) +DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( + "drop_device_list_streams_non_unique_indexes" +) -class DeviceStore(SQLBaseStore): + +class DeviceStore(BackgroundUpdateStore): def __init__(self, db_conn, hs): super(DeviceStore, self).__init__(db_conn, hs) @@ -52,6 +57,30 @@ class DeviceStore(SQLBaseStore): columns=["user_id", "device_id"], ) + # create a unique index on device_lists_remote_cache + self.register_background_index_update( + "device_lists_remote_cache_unique_idx", + index_name="device_lists_remote_cache_unique_id", + table="device_lists_remote_cache", + columns=["user_id", "device_id"], + unique=True, + ) + + # And one on device_lists_remote_extremeties + self.register_background_index_update( + "device_lists_remote_extremeties_unique_idx", + index_name="device_lists_remote_extremeties_unique_idx", + table="device_lists_remote_extremeties", + columns=["user_id"], + unique=True, + ) + + # once they complete, we can remove the old non-unique indexes. + self.register_background_update_handler( + DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES, + self._drop_device_list_streams_non_unique_indexes, + ) + @defer.inlineCallbacks def store_device(self, user_id, device_id, initial_device_display_name): @@ -757,3 +786,19 @@ class DeviceStore(SQLBaseStore): "_prune_old_outbound_device_pokes", _prune_txn, ) + + @defer.inlineCallbacks + def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size): + def f(conn): + txn = conn.cursor() + txn.execute( + "DROP INDEX IF EXISTS device_lists_remote_cache_id" + ) + txn.execute( + "DROP INDEX IF EXISTS device_lists_remote_extremeties_id" + ) + txn.close() + + yield self.runWithConnection(f) + yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) + defer.returnValue(1) diff --git a/synapse/storage/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/delta/40/device_list_streams.sql index 54841b3843..dd6dcb65f1 100644 --- a/synapse/storage/schema/delta/40/device_list_streams.sql +++ b/synapse/storage/schema/delta/40/device_list_streams.sql @@ -20,9 +20,6 @@ CREATE TABLE device_lists_remote_cache ( content TEXT NOT NULL ); -CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id); - - -- The last update we got for a user. Empty if we're not receiving updates for -- that user. CREATE TABLE device_lists_remote_extremeties ( @@ -30,7 +27,11 @@ CREATE TABLE device_lists_remote_extremeties ( stream_id TEXT NOT NULL ); -CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id); +-- we used to create non-unique indexes on these tables, but as of update 52 we create +-- unique indexes concurrently: +-- +-- CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id); +-- CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id); -- Stream of device lists updates. Includes both local and remotes diff --git a/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql new file mode 100644 index 0000000000..bfa49e6f92 --- /dev/null +++ b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql @@ -0,0 +1,36 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- register a background update which will create a unique index on +-- device_lists_remote_cache +INSERT into background_updates (update_name, progress_json) + VALUES ('device_lists_remote_cache_unique_idx', '{}'); + +-- and one on device_lists_remote_extremeties +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ( + 'device_lists_remote_extremeties_unique_idx', '{}', + + -- doesn't really depend on this, but we need to make sure both happen + -- before we drop the old indexes. + 'device_lists_remote_cache_unique_idx' + ); + +-- once they complete, we can drop the old indexes. +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ( + 'drop_device_list_streams_non_unique_indexes', '{}', + 'device_lists_remote_extremeties_unique_idx' + ); -- cgit 1.4.1 From 1cc6671ec4157f363d9c476825693b3ad318ffda Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Nov 2018 19:14:04 +0000 Subject: changelog --- changelog.d/4132.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4132.bugfix diff --git a/changelog.d/4132.bugfix b/changelog.d/4132.bugfix new file mode 100644 index 0000000000..2304a40f05 --- /dev/null +++ b/changelog.d/4132.bugfix @@ -0,0 +1 @@ +Fix table lock of device_lists_remote_cache which could freeze the application \ No newline at end of file -- cgit 1.4.1 From cb7a6b2379e0e0a4ba8043da98e376b45d05b977 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 3 Nov 2018 00:19:23 +1100 Subject: Fix typing being reset causing infinite syncs (#4127) --- changelog.d/4127.bugfix | 1 + synapse/app/synchrotron.py | 14 ++++ synapse/handlers/typing.py | 14 ++-- tests/rest/client/v2_alpha/test_sync.py | 123 ++++++++++++++++++++++++++++++++ tests/server.py | 8 ++- 5 files changed, 155 insertions(+), 5 deletions(-) create mode 100644 changelog.d/4127.bugfix diff --git a/changelog.d/4127.bugfix b/changelog.d/4127.bugfix new file mode 100644 index 0000000000..0701d2ceaa --- /dev/null +++ b/changelog.d/4127.bugfix @@ -0,0 +1 @@ +If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 3926c7f263..0354e82bf8 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -226,7 +226,15 @@ class SynchrotronPresence(object): class SynchrotronTyping(object): def __init__(self, hs): self._latest_room_serial = 0 + self._reset() + + def _reset(self): + """ + Reset the typing handler's data caches. + """ + # map room IDs to serial numbers self._room_serials = {} + # map room IDs to sets of users currently typing self._room_typing = {} def stream_positions(self): @@ -236,6 +244,12 @@ class SynchrotronTyping(object): return {"typing": self._latest_room_serial} def process_replication_rows(self, token, rows): + if self._latest_room_serial > token: + # The master has gone backwards. To prevent inconsistent data, just + # clear everything. + self._reset() + + # Set the latest serial token to whatever the server gave us. self._latest_room_serial = token for row in rows: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c610933dd4..a61bbf9392 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -63,11 +63,8 @@ class TypingHandler(object): self._member_typing_until = {} # clock time we expect to stop self._member_last_federation_poke = {} - # map room IDs to serial numbers - self._room_serials = {} self._latest_room_serial = 0 - # map room IDs to sets of users currently typing - self._room_typing = {} + self._reset() # caches which room_ids changed at which serials self._typing_stream_change_cache = StreamChangeCache( @@ -79,6 +76,15 @@ class TypingHandler(object): 5000, ) + def _reset(self): + """ + Reset the typing handler's data caches. + """ + # map room IDs to serial numbers + self._room_serials = {} + # map room IDs to sets of users currently typing + self._room_typing = {} + def _handle_timeouts(self): logger.info("Checking for typing timeouts") diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 4c30c5f258..99b716f00a 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -15,9 +15,11 @@ from mock import Mock +from synapse.rest.client.v1 import admin, login, room from synapse.rest.client.v2_alpha import sync from tests import unittest +from tests.server import TimedOutException class FilterTestCase(unittest.HomeserverTestCase): @@ -65,3 +67,124 @@ class FilterTestCase(unittest.HomeserverTestCase): ["next_batch", "rooms", "account_data", "to_device", "device_lists"] ).issubset(set(channel.json_body.keys())) ) + + +class SyncTypingTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + user_id = True + hijack_auth = False + + def test_sync_backwards_typing(self): + """ + If the typing serial goes backwards and the typing handler is then reset + (such as when the master restarts and sets the typing serial to 0), we + do not incorrectly return typing information that had a serial greater + than the now-reset serial. + """ + typing_url = "/rooms/%s/typing/%s?access_token=%s" + sync_url = "/sync?timeout=3000000&access_token=%s&since=%s" + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Invite the other person + self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=other_access_token) + self.helper.send(room, body="There!", tok=other_access_token) + + # Start typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + request, channel = self.make_request( + "GET", "/sync?access_token=%s" % (access_token,) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Stop typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": false}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + # Start typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + # Should return immediately + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Reset typing serial back to 0, as if the master had. + typing = self.hs.get_typing_handler() + typing._latest_room_serial = 0 + + # Since it checks the state token, we need some state to update to + # invalidate the stream token. + self.helper.send(room, body="There!", tok=other_access_token) + + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # This should time out! But it does not, because our stream token is + # ahead, and therefore it's saying the typing (that we've actually + # already seen) is new, since it's got a token above our new, now-reset + # stream token. + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Clear the typing information, so that it doesn't think everything is + # in the future. + typing._reset() + + # Now it SHOULD fail as it never completes! + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.assertRaises(TimedOutException, self.render, request) diff --git a/tests/server.py b/tests/server.py index 819c854448..cc6dbe04ac 100644 --- a/tests/server.py +++ b/tests/server.py @@ -21,6 +21,12 @@ from synapse.util import Clock from tests.utils import setup_test_homeserver as _sth +class TimedOutException(Exception): + """ + A web query timed out. + """ + + @attr.s class FakeChannel(object): """ @@ -153,7 +159,7 @@ def wait_until_result(clock, request, timeout=100): x += 1 if x > timeout: - raise Exception("Timed out waiting for request to finish.") + raise TimedOutException("Timed out waiting for request to finish.") clock.advance(0.1) -- cgit 1.4.1