From 37be52ac34f3ec2e409f2961e428c1f42d25d729 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 31 Jul 2018 16:29:09 -0600 Subject: limt -> limit --- synapse/federation/transport/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 4529d454af..625b4def9b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -106,7 +106,7 @@ class TransportLayerClient(object): dest (str) room_id (str) event_tuples (list) - limt (int) + limit (int) Returns: Deferred: Results in a dict received from the remote homeserver. -- cgit 1.5.1 From 495cb100d127212d55a46c177706d732950e70be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Aug 2018 14:46:17 +0100 Subject: Allow profile changes to happen on workers --- synapse/app/event_creator.py | 8 ++++ synapse/handlers/profile.py | 26 ++++++++++--- synapse/replication/http/__init__.py | 3 +- synapse/replication/http/profile.py | 71 ++++++++++++++++++++++++++++++++++++ synapse/storage/profile.py | 6 ++- 5 files changed, 106 insertions(+), 8 deletions(-) create mode 100644 synapse/replication/http/profile.py (limited to 'synapse') diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 374f115644..0c56fc777b 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -45,6 +45,11 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import TransactionStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.rest.client.v1.profile import ( + ProfileAvatarURLRestServlet, + ProfileDisplaynameRestServlet, + ProfileRestServlet, +) from synapse.rest.client.v1.room import ( JoinRoomAliasServlet, RoomMembershipRestServlet, @@ -101,6 +106,9 @@ class EventCreatorServer(HomeServer): RoomMembershipRestServlet(self).register(resource) RoomStateEventRestServlet(self).register(resource) JoinRoomAliasServlet(self).register(resource) + ProfileAvatarURLRestServlet(self).register(resource) + ProfileDisplaynameRestServlet(self).register(resource) + ProfileRestServlet(self).register(resource) resources.update({ "/_matrix/client/r0": resource, "/_matrix/client/unstable": resource, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index cb5c6d587e..a3bdb1830f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, CodeMessageException, SynapseError from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.http.profile import ReplicationHandleProfileChangeRestServlet from synapse.types import UserID, get_domain_from_id from ._base import BaseHandler @@ -45,6 +46,10 @@ class ProfileHandler(BaseHandler): self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS, ) + self._notify_master_profile_change = ( + ReplicationHandleProfileChangeRestServlet.make_client(hs) + ) + @defer.inlineCallbacks def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -147,10 +152,16 @@ class ProfileHandler(BaseHandler): ) if self.hs.config.user_directory_search_all_users: - profile = yield self.store.get_profileinfo(target_user.localpart) - yield self.user_directory_handler.handle_local_profile_change( - target_user.to_string(), profile - ) + if self.hs.config.worker_app is None: + profile = yield self.store.get_profileinfo(target_user.localpart) + yield self.user_directory_handler.handle_local_profile_change( + target_user.to_string(), profile + ) + else: + yield self._notify_master_profile_change( + requester=requester, + user_id=target_user.to_string(), + ) yield self._update_join_states(requester, target_user) @@ -196,11 +207,16 @@ class ProfileHandler(BaseHandler): target_user.localpart, new_avatar_url ) - if self.hs.config.user_directory_search_all_users: + if self.hs.config.worker_app is None: profile = yield self.store.get_profileinfo(target_user.localpart) yield self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) + else: + yield self._notify_master_profile_change( + requester=requester, + user_id=target_user.to_string(), + ) yield self._update_join_states(requester, target_user) diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index 589ee94c66..1fbbdf53e9 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. from synapse.http.server import JsonResource -from synapse.replication.http import membership, send_event +from synapse.replication.http import membership, profile, send_event REPLICATION_PREFIX = "/_synapse/replication" @@ -27,3 +27,4 @@ class ReplicationRestResource(JsonResource): def register_servlets(self, hs): send_event.register_servlets(hs, self) membership.register_servlets(hs, self) + profile.register_servlets(hs, self) diff --git a/synapse/replication/http/profile.py b/synapse/replication/http/profile.py new file mode 100644 index 0000000000..c4d54c936f --- /dev/null +++ b/synapse/replication/http/profile.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.http.servlet import parse_json_object_from_request +from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import Requester, UserID + +logger = logging.getLogger(__name__) + + +class ReplicationHandleProfileChangeRestServlet(ReplicationEndpoint): + NAME = "profile_changed" + PATH_ARGS = ("user_id",) + POST = True + + def __init__(self, hs): + super(ReplicationHandleProfileChangeRestServlet, self).__init__(hs) + + self.user_directory_handler = hs.get_user_directory_handler() + self.store = hs.get_datastore() + self.clock = hs.get_clock() + + @staticmethod + def _serialize_payload(requester, user_id): + """ + Args: + requester (Requester) + user_id (str) + """ + + return { + "requester": requester.serialize(), + } + + @defer.inlineCallbacks + def _handle_request(self, request, user_id): + content = parse_json_object_from_request(request) + + requester = Requester.deserialize(self.store, content["requester"]) + + if requester.user: + request.authenticated_entity = requester.user.to_string() + + target_user = UserID.from_string(user_id) + + profile = yield self.store.get_profileinfo(target_user.localpart) + yield self.user_directory_handler.handle_local_profile_change( + user_id, profile + ) + + defer.returnValue((200, {})) + + +def register_servlets(hs, http_server): + ReplicationHandleProfileChangeRestServlet(hs).register(http_server) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 60295da254..246ab836bd 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -71,8 +71,6 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_from_remote_profile_cache", ) - -class ProfileStore(ProfileWorkerStore): def create_profile(self, user_localpart): return self._simple_insert( table="profiles", @@ -182,3 +180,7 @@ class ProfileStore(ProfileWorkerStore): if res: defer.returnValue(True) + + +class ProfileStore(ProfileWorkerStore): + pass -- cgit 1.5.1 From cd9765805e2792ad7d6b0b014447f51d2b9add8c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Aug 2018 10:48:31 +0100 Subject: Allow ratelimiting on workers --- synapse/storage/room.py | 58 ++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 3147fb6827..1e473cc188 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -170,6 +170,35 @@ class RoomWorkerStore(SQLBaseStore): desc="is_room_blocked", ) + @cachedInlineCallbacks(max_entries=10000) + def get_ratelimit_for_user(self, user_id): + """Check if there are any overrides for ratelimiting for the given + user + + Args: + user_id (str) + + Returns: + RatelimitOverride if there is an override, else None. If the contents + of RatelimitOverride are None or 0 then ratelimitng has been + disabled for that user entirely. + """ + row = yield self._simple_select_one( + table="ratelimit_override", + keyvalues={"user_id": user_id}, + retcols=("messages_per_second", "burst_count"), + allow_none=True, + desc="get_ratelimit_for_user", + ) + + if row: + defer.returnValue(RatelimitOverride( + messages_per_second=row["messages_per_second"], + burst_count=row["burst_count"], + )) + else: + defer.returnValue(None) + class RoomStore(RoomWorkerStore, SearchStore): @@ -469,35 +498,6 @@ class RoomStore(RoomWorkerStore, SearchStore): "get_all_new_public_rooms", get_all_new_public_rooms ) - @cachedInlineCallbacks(max_entries=10000) - def get_ratelimit_for_user(self, user_id): - """Check if there are any overrides for ratelimiting for the given - user - - Args: - user_id (str) - - Returns: - RatelimitOverride if there is an override, else None. If the contents - of RatelimitOverride are None or 0 then ratelimitng has been - disabled for that user entirely. - """ - row = yield self._simple_select_one( - table="ratelimit_override", - keyvalues={"user_id": user_id}, - retcols=("messages_per_second", "burst_count"), - allow_none=True, - desc="get_ratelimit_for_user", - ) - - if row: - defer.returnValue(RatelimitOverride( - messages_per_second=row["messages_per_second"], - burst_count=row["burst_count"], - )) - else: - defer.returnValue(None) - @defer.inlineCallbacks def block_room(self, room_id, user_id): yield self._simple_insert( -- cgit 1.5.1 From a6c813761aa38d9ea0ff6db7303b25dfc7b77712 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Aug 2018 10:41:08 +0100 Subject: Docstrings --- synapse/replication/http/profile.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/replication/http/profile.py b/synapse/replication/http/profile.py index c4d54c936f..aa700c2434 100644 --- a/synapse/replication/http/profile.py +++ b/synapse/replication/http/profile.py @@ -25,9 +25,19 @@ logger = logging.getLogger(__name__) class ReplicationHandleProfileChangeRestServlet(ReplicationEndpoint): + """Notifies that a users profile has changed + + Request format: + + POST /_synapse/replication/profile_changed/:user_id + + { + "requester": ... + } + """ + NAME = "profile_changed" PATH_ARGS = ("user_id",) - POST = True def __init__(self, hs): super(ReplicationHandleProfileChangeRestServlet, self).__init__(hs) -- cgit 1.5.1 From 119451dcd192ff5397a3f9630f14556737e82677 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Aug 2018 15:22:40 +0100 Subject: Refactor state module We split out the actual state resolution algorithm to prepare for having multiple versions. --- synapse/state.py | 866 ---------------------------------------------- synapse/state/__init__.py | 574 ++++++++++++++++++++++++++++++ synapse/state/v1.py | 321 +++++++++++++++++ 3 files changed, 895 insertions(+), 866 deletions(-) delete mode 100644 synapse/state.py create mode 100644 synapse/state/__init__.py create mode 100644 synapse/state/v1.py (limited to 'synapse') diff --git a/synapse/state.py b/synapse/state.py deleted file mode 100644 index e1092b97a9..0000000000 --- a/synapse/state.py +++ /dev/null @@ -1,866 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import hashlib -import logging -from collections import namedtuple - -from six import iteritems, iterkeys, itervalues - -from frozendict import frozendict - -from twisted.internet import defer - -from synapse import event_auth -from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError -from synapse.events.snapshot import EventContext -from synapse.util.async import Linearizer -from synapse.util.caches import CACHE_SIZE_FACTOR -from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logutils import log_function -from synapse.util.metrics import Measure - -logger = logging.getLogger(__name__) - - -KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key")) - - -SIZE_OF_CACHE = int(100000 * CACHE_SIZE_FACTOR) -EVICTION_TIMEOUT_SECONDS = 60 * 60 - - -_NEXT_STATE_ID = 1 - -POWER_KEY = (EventTypes.PowerLevels, "") - - -def _gen_state_id(): - global _NEXT_STATE_ID - s = "X%d" % (_NEXT_STATE_ID,) - _NEXT_STATE_ID += 1 - return s - - -class _StateCacheEntry(object): - __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"] - - def __init__(self, state, state_group, prev_group=None, delta_ids=None): - # dict[(str, str), str] map from (type, state_key) to event_id - self.state = frozendict(state) - - # the ID of a state group if one and only one is involved. - # otherwise, None otherwise? - self.state_group = state_group - - self.prev_group = prev_group - self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None - - # The `state_id` is a unique ID we generate that can be used as ID for - # this collection of state. Usually this would be the same as the - # state group, but on worker instances we can't generate a new state - # group each time we resolve state, so we generate a separate one that - # isn't persisted and is used solely for caches. - # `state_id` is either a state_group (and so an int) or a string. This - # ensures we don't accidentally persist a state_id as a stateg_group - if state_group: - self.state_id = state_group - else: - self.state_id = _gen_state_id() - - def __len__(self): - return len(self.state) - - -class StateHandler(object): - """Fetches bits of state from the stores, and does state resolution - where necessary - """ - - def __init__(self, hs): - self.clock = hs.get_clock() - self.store = hs.get_datastore() - self.hs = hs - self._state_resolution_handler = hs.get_state_resolution_handler() - - def start_caching(self): - # TODO: remove this shim - self._state_resolution_handler.start_caching() - - @defer.inlineCallbacks - def get_current_state(self, room_id, event_type=None, state_key="", - latest_event_ids=None): - """ Retrieves the current state for the room. This is done by - calling `get_latest_events_in_room` to get the leading edges of the - event graph and then resolving any of the state conflicts. - - This is equivalent to getting the state of an event that were to send - next before receiving any new events. - - If `event_type` is specified, then the method returns only the one - event (or None) with that `event_type` and `state_key`. - - Returns: - map from (type, state_key) to event - """ - if not latest_event_ids: - latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) - - logger.debug("calling resolve_state_groups from get_current_state") - ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) - state = ret.state - - if event_type: - event_id = state.get((event_type, state_key)) - event = None - if event_id: - event = yield self.store.get_event(event_id, allow_none=True) - defer.returnValue(event) - return - - state_map = yield self.store.get_events(list(state.values()), - get_prev_content=False) - state = { - key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map - } - - defer.returnValue(state) - - @defer.inlineCallbacks - def get_current_state_ids(self, room_id, latest_event_ids=None): - """Get the current state, or the state at a set of events, for a room - - Args: - room_id (str): - - latest_event_ids (iterable[str]|None): if given, the forward - extremities to resolve. If None, we look them up from the - database (via a cache) - - Returns: - Deferred[dict[(str, str), str)]]: the state dict, mapping from - (event_type, state_key) -> event_id - """ - if not latest_event_ids: - latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) - - logger.debug("calling resolve_state_groups from get_current_state_ids") - ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) - state = ret.state - - defer.returnValue(state) - - @defer.inlineCallbacks - def get_current_user_in_room(self, room_id, latest_event_ids=None): - if not latest_event_ids: - latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) - logger.debug("calling resolve_state_groups from get_current_user_in_room") - entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) - joined_users = yield self.store.get_joined_users_from_state(room_id, entry) - defer.returnValue(joined_users) - - @defer.inlineCallbacks - def get_current_hosts_in_room(self, room_id, latest_event_ids=None): - if not latest_event_ids: - latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) - logger.debug("calling resolve_state_groups from get_current_hosts_in_room") - entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) - joined_hosts = yield self.store.get_joined_hosts(room_id, entry) - defer.returnValue(joined_hosts) - - @defer.inlineCallbacks - def compute_event_context(self, event, old_state=None): - """Build an EventContext structure for the event. - - This works out what the current state should be for the event, and - generates a new state group if necessary. - - Args: - event (synapse.events.EventBase): - old_state (dict|None): The state at the event if it can't be - calculated from existing events. This is normally only specified - when receiving an event from federation where we don't have the - prev events for, e.g. when backfilling. - Returns: - synapse.events.snapshot.EventContext: - """ - - if event.internal_metadata.is_outlier(): - # If this is an outlier, then we know it shouldn't have any current - # state. Certainly store.get_current_state won't return any, and - # persisting the event won't store the state group. - if old_state: - prev_state_ids = { - (s.type, s.state_key): s.event_id for s in old_state - } - if event.is_state(): - current_state_ids = dict(prev_state_ids) - key = (event.type, event.state_key) - current_state_ids[key] = event.event_id - else: - current_state_ids = prev_state_ids - else: - current_state_ids = {} - prev_state_ids = {} - - # We don't store state for outliers, so we don't generate a state - # group for it. - context = EventContext.with_state( - state_group=None, - current_state_ids=current_state_ids, - prev_state_ids=prev_state_ids, - ) - - defer.returnValue(context) - - if old_state: - # We already have the state, so we don't need to calculate it. - # Let's just correctly fill out the context and create a - # new state group for it. - - prev_state_ids = { - (s.type, s.state_key): s.event_id for s in old_state - } - - if event.is_state(): - key = (event.type, event.state_key) - if key in prev_state_ids: - replaces = prev_state_ids[key] - if replaces != event.event_id: # Paranoia check - event.unsigned["replaces_state"] = replaces - current_state_ids = dict(prev_state_ids) - current_state_ids[key] = event.event_id - else: - current_state_ids = prev_state_ids - - state_group = yield self.store.store_state_group( - event.event_id, - event.room_id, - prev_group=None, - delta_ids=None, - current_state_ids=current_state_ids, - ) - - context = EventContext.with_state( - state_group=state_group, - current_state_ids=current_state_ids, - prev_state_ids=prev_state_ids, - ) - - defer.returnValue(context) - - logger.debug("calling resolve_state_groups from compute_event_context") - entry = yield self.resolve_state_groups_for_events( - event.room_id, [e for e, _ in event.prev_events], - ) - - prev_state_ids = entry.state - prev_group = None - delta_ids = None - - if event.is_state(): - # If this is a state event then we need to create a new state - # group for the state after this event. - - key = (event.type, event.state_key) - if key in prev_state_ids: - replaces = prev_state_ids[key] - event.unsigned["replaces_state"] = replaces - - current_state_ids = dict(prev_state_ids) - current_state_ids[key] = event.event_id - - if entry.state_group: - # If the state at the event has a state group assigned then - # we can use that as the prev group - prev_group = entry.state_group - delta_ids = { - key: event.event_id - } - elif entry.prev_group: - # If the state at the event only has a prev group, then we can - # use that as a prev group too. - prev_group = entry.prev_group - delta_ids = dict(entry.delta_ids) - delta_ids[key] = event.event_id - - state_group = yield self.store.store_state_group( - event.event_id, - event.room_id, - prev_group=prev_group, - delta_ids=delta_ids, - current_state_ids=current_state_ids, - ) - else: - current_state_ids = prev_state_ids - prev_group = entry.prev_group - delta_ids = entry.delta_ids - - if entry.state_group is None: - entry.state_group = yield self.store.store_state_group( - event.event_id, - event.room_id, - prev_group=entry.prev_group, - delta_ids=entry.delta_ids, - current_state_ids=current_state_ids, - ) - entry.state_id = entry.state_group - - state_group = entry.state_group - - context = EventContext.with_state( - state_group=state_group, - current_state_ids=current_state_ids, - prev_state_ids=prev_state_ids, - prev_group=prev_group, - delta_ids=delta_ids, - ) - - defer.returnValue(context) - - @defer.inlineCallbacks - def resolve_state_groups_for_events(self, room_id, event_ids): - """ Given a list of event_ids this method fetches the state at each - event, resolves conflicts between them and returns them. - - Args: - room_id (str): - event_ids (list[str]): - - Returns: - Deferred[_StateCacheEntry]: resolved state - """ - logger.debug("resolve_state_groups event_ids %s", event_ids) - - # map from state group id to the state in that state group (where - # 'state' is a map from state key to event id) - # dict[int, dict[(str, str), str]] - state_groups_ids = yield self.store.get_state_groups_ids( - room_id, event_ids - ) - - if len(state_groups_ids) == 1: - name, state_list = list(state_groups_ids.items()).pop() - - prev_group, delta_ids = yield self.store.get_state_group_delta(name) - - defer.returnValue(_StateCacheEntry( - state=state_list, - state_group=name, - prev_group=prev_group, - delta_ids=delta_ids, - )) - - result = yield self._state_resolution_handler.resolve_state_groups( - room_id, state_groups_ids, None, self._state_map_factory, - ) - defer.returnValue(result) - - def _state_map_factory(self, ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - - def resolve_events(self, state_sets, event): - logger.info( - "Resolving state for %s with %d groups", event.room_id, len(state_sets) - ) - state_set_ids = [{ - (ev.type, ev.state_key): ev.event_id - for ev in st - } for st in state_sets] - - state_map = { - ev.event_id: ev - for st in state_sets - for ev in st - } - - with Measure(self.clock, "state._resolve_events"): - new_state = resolve_events_with_state_map(state_set_ids, state_map) - - new_state = { - key: state_map[ev_id] for key, ev_id in iteritems(new_state) - } - - return new_state - - -class StateResolutionHandler(object): - """Responsible for doing state conflict resolution. - - Note that the storage layer depends on this handler, so all functions must - be storage-independent. - """ - def __init__(self, hs): - self.clock = hs.get_clock() - - # dict of set of event_ids -> _StateCacheEntry. - self._state_cache = None - self.resolve_linearizer = Linearizer(name="state_resolve_lock") - - def start_caching(self): - logger.debug("start_caching") - - self._state_cache = ExpiringCache( - cache_name="state_cache", - clock=self.clock, - max_len=SIZE_OF_CACHE, - expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, - iterable=True, - reset_expiry_on_get=True, - ) - - self._state_cache.start() - - @defer.inlineCallbacks - @log_function - def resolve_state_groups( - self, room_id, state_groups_ids, event_map, state_map_factory, - ): - """Resolves conflicts between a set of state groups - - Always generates a new state group (unless we hit the cache), so should - not be called for a single state group - - Args: - room_id (str): room we are resolving for (used for logging) - state_groups_ids (dict[int, dict[(str, str), str]]): - map from state group id to the state in that state group - (where 'state' is a map from state key to event id) - - event_map(dict[str,FrozenEvent]|None): - a dict from event_id to event, for any events that we happen to - have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. - - If None, all events will be fetched via state_map_factory. - - Returns: - Deferred[_StateCacheEntry]: resolved state - """ - logger.debug( - "resolve_state_groups state_groups %s", - state_groups_ids.keys() - ) - - group_names = frozenset(state_groups_ids.keys()) - - with (yield self.resolve_linearizer.queue(group_names)): - if self._state_cache is not None: - cache = self._state_cache.get(group_names, None) - if cache: - defer.returnValue(cache) - - logger.info( - "Resolving state for %s with %d groups", room_id, len(state_groups_ids) - ) - - # start by assuming we won't have any conflicted state, and build up the new - # state map by iterating through the state groups. If we discover a conflict, - # we give up and instead use `resolve_events_with_factory`. - # - # XXX: is this actually worthwhile, or should we just let - # resolve_events_with_factory do it? - new_state = {} - conflicted_state = False - for st in itervalues(state_groups_ids): - for key, e_id in iteritems(st): - if key in new_state: - conflicted_state = True - break - new_state[key] = e_id - if conflicted_state: - break - - if conflicted_state: - logger.info("Resolving conflicted state for %r", room_id) - with Measure(self.clock, "state._resolve_events"): - new_state = yield resolve_events_with_factory( - list(itervalues(state_groups_ids)), - event_map=event_map, - state_map_factory=state_map_factory, - ) - - # if the new state matches any of the input state groups, we can - # use that state group again. Otherwise we will generate a state_id - # which will be used as a cache key for future resolutions, but - # not get persisted. - - with Measure(self.clock, "state.create_group_ids"): - cache = _make_state_cache_entry(new_state, state_groups_ids) - - if self._state_cache is not None: - self._state_cache[group_names] = cache - - defer.returnValue(cache) - - -def _make_state_cache_entry( - new_state, - state_groups_ids, -): - """Given a resolved state, and a set of input state groups, pick one to base - a new state group on (if any), and return an appropriately-constructed - _StateCacheEntry. - - Args: - new_state (dict[(str, str), str]): resolved state map (mapping from - (type, state_key) to event_id) - - state_groups_ids (dict[int, dict[(str, str), str]]): - map from state group id to the state in that state group - (where 'state' is a map from state key to event id) - - Returns: - _StateCacheEntry - """ - # if the new state matches any of the input state groups, we can - # use that state group again. Otherwise we will generate a state_id - # which will be used as a cache key for future resolutions, but - # not get persisted. - - # first look for exact matches - new_state_event_ids = set(itervalues(new_state)) - for sg, state in iteritems(state_groups_ids): - if len(new_state_event_ids) != len(state): - continue - - old_state_event_ids = set(itervalues(state)) - if new_state_event_ids == old_state_event_ids: - # got an exact match. - return _StateCacheEntry( - state=new_state, - state_group=sg, - ) - - # TODO: We want to create a state group for this set of events, to - # increase cache hits, but we need to make sure that it doesn't - # end up as a prev_group without being added to the database - - # failing that, look for the closest match. - prev_group = None - delta_ids = None - - for old_group, old_state in iteritems(state_groups_ids): - n_delta_ids = { - k: v - for k, v in iteritems(new_state) - if old_state.get(k) != v - } - if not delta_ids or len(n_delta_ids) < len(delta_ids): - prev_group = old_group - delta_ids = n_delta_ids - - return _StateCacheEntry( - state=new_state, - state_group=None, - prev_group=prev_group, - delta_ids=delta_ids, - ) - - -def _ordered_events(events): - def key_func(e): - return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest() - - return sorted(events, key=key_func) - - -def resolve_events_with_state_map(state_sets, state_map): - """ - Args: - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if len(state_sets) == 1: - return state_sets[0] - - unconflicted_state, conflicted_state = _seperate( - state_sets, - ) - - auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map - ) - - return _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) - - -def _seperate(state_sets): - """Takes the state_sets and figures out which keys are conflicted and - which aren't. i.e., which have multiple different event_ids associated - with them in different state sets. - - Args: - state_sets(iterable[dict[(str, str), str]]): - List of dicts of (type, state_key) -> event_id, which are the - different state groups to resolve. - - Returns: - (dict[(str, str), str], dict[(str, str), set[str]]): - A tuple of (unconflicted_state, conflicted_state), where: - - unconflicted_state is a dict mapping (type, state_key)->event_id - for unconflicted state keys. - - conflicted_state is a dict mapping (type, state_key) to a set of - event ids for conflicted state keys. - """ - state_set_iterator = iter(state_sets) - unconflicted_state = dict(next(state_set_iterator)) - conflicted_state = {} - - for state_set in state_set_iterator: - for key, value in iteritems(state_set): - # Check if there is an unconflicted entry for the state key. - unconflicted_value = unconflicted_state.get(key) - if unconflicted_value is None: - # There isn't an unconflicted entry so check if there is a - # conflicted entry. - ls = conflicted_state.get(key) - if ls is None: - # There wasn't a conflicted entry so haven't seen this key before. - # Therefore it isn't conflicted yet. - unconflicted_state[key] = value - else: - # This key is already conflicted, add our value to the conflict set. - ls.add(value) - elif unconflicted_value != value: - # If the unconflicted value is not the same as our value then we - # have a new conflict. So move the key from the unconflicted_state - # to the conflicted state. - conflicted_state[key] = {value, unconflicted_value} - unconflicted_state.pop(key, None) - - return unconflicted_state, conflicted_state - - -@defer.inlineCallbacks -def resolve_events_with_factory(state_sets, event_map, state_map_factory): - """ - Args: - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - - event_map(dict[str,FrozenEvent]|None): - a dict from event_id to event, for any events that we happen to - have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. - - If None, all events will be fetched via state_map_factory. - - state_map_factory(func): will be called - with a list of event_ids that are needed, and should return with - a Deferred of dict of event_id to event. - - Returns - Deferred[dict[(str, str), str]]: - a map from (type, state_key) to event_id. - """ - if len(state_sets) == 1: - defer.returnValue(state_sets[0]) - - unconflicted_state, conflicted_state = _seperate( - state_sets, - ) - - needed_events = set( - event_id - for event_ids in itervalues(conflicted_state) - for event_id in event_ids - ) - if event_map is not None: - needed_events -= set(iterkeys(event_map)) - - logger.info("Asking for %d conflicted events", len(needed_events)) - - # dict[str, FrozenEvent]: a map from state event id to event. Only includes - # the state events which are in conflict (and those in event_map) - state_map = yield state_map_factory(needed_events) - if event_map is not None: - state_map.update(event_map) - - # get the ids of the auth events which allow us to authenticate the - # conflicted state, picking only from the unconflicting state. - # - # dict[(str, str), str]: a map from state key to event id - auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map - ) - - new_needed_events = set(itervalues(auth_events)) - new_needed_events -= needed_events - if event_map is not None: - new_needed_events -= set(iterkeys(event_map)) - - logger.info("Asking for %d auth events", len(new_needed_events)) - - state_map_new = yield state_map_factory(new_needed_events) - state_map.update(state_map_new) - - defer.returnValue(_resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - )) - - -def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map): - auth_events = {} - for event_ids in itervalues(conflicted_state): - for event_id in event_ids: - if event_id in state_map: - keys = event_auth.auth_types_for_event(state_map[event_id]) - for key in keys: - if key not in auth_events: - event_id = unconflicted_state.get(key, None) - if event_id: - auth_events[key] = event_id - return auth_events - - -def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids, - state_map): - conflicted_state = {} - for key, event_ids in iteritems(conflicted_state_ids): - events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map] - if len(events) > 1: - conflicted_state[key] = events - elif len(events) == 1: - unconflicted_state_ids[key] = events[0].event_id - - auth_events = { - key: state_map[ev_id] - for key, ev_id in iteritems(auth_event_ids) - if ev_id in state_map - } - - try: - resolved_state = _resolve_state_events( - conflicted_state, auth_events - ) - except Exception: - logger.exception("Failed to resolve state") - raise - - new_state = unconflicted_state_ids - for key, event in iteritems(resolved_state): - new_state[key] = event.event_id - - return new_state - - -def _resolve_state_events(conflicted_state, auth_events): - """ This is where we actually decide which of the conflicted state to - use. - - We resolve conflicts in the following order: - 1. power levels - 2. join rules - 3. memberships - 4. other events. - """ - resolved_state = {} - if POWER_KEY in conflicted_state: - events = conflicted_state[POWER_KEY] - logger.debug("Resolving conflicted power levels %r", events) - resolved_state[POWER_KEY] = _resolve_auth_events( - events, auth_events) - - auth_events.update(resolved_state) - - for key, events in iteritems(conflicted_state): - if key[0] == EventTypes.JoinRules: - logger.debug("Resolving conflicted join rules %r", events) - resolved_state[key] = _resolve_auth_events( - events, - auth_events - ) - - auth_events.update(resolved_state) - - for key, events in iteritems(conflicted_state): - if key[0] == EventTypes.Member: - logger.debug("Resolving conflicted member lists %r", events) - resolved_state[key] = _resolve_auth_events( - events, - auth_events - ) - - auth_events.update(resolved_state) - - for key, events in iteritems(conflicted_state): - if key not in resolved_state: - logger.debug("Resolving conflicted state %r:%r", key, events) - resolved_state[key] = _resolve_normal_events( - events, auth_events - ) - - return resolved_state - - -def _resolve_auth_events(events, auth_events): - reverse = [i for i in reversed(_ordered_events(events))] - - auth_keys = set( - key - for event in events - for key in event_auth.auth_types_for_event(event) - ) - - new_auth_events = {} - for key in auth_keys: - auth_event = auth_events.get(key, None) - if auth_event: - new_auth_events[key] = auth_event - - auth_events = new_auth_events - - prev_event = reverse[0] - for event in reverse[1:]: - auth_events[(prev_event.type, prev_event.state_key)] = prev_event - try: - # The signatures have already been checked at this point - event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False) - prev_event = event - except AuthError: - return prev_event - - return event - - -def _resolve_normal_events(events, auth_events): - for event in _ordered_events(events): - try: - # The signatures have already been checked at this point - event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False) - return event - except AuthError: - pass - - # Use the last event (the one with the least depth) if they all fail - # the auth check. - return event diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py new file mode 100644 index 0000000000..8c091d07c9 --- /dev/null +++ b/synapse/state/__init__.py @@ -0,0 +1,574 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from collections import namedtuple + +from six import iteritems, itervalues + +from frozendict import frozendict + +from twisted.internet import defer + +from synapse.api.constants import EventTypes +from synapse.events.snapshot import EventContext +from synapse.util.async import Linearizer +from synapse.util.caches import CACHE_SIZE_FACTOR +from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.logutils import log_function +from synapse.util.metrics import Measure + +from .v1 import resolve_events_with_factory, resolve_events_with_state_map + +logger = logging.getLogger(__name__) + + +KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key")) + + +SIZE_OF_CACHE = int(100000 * CACHE_SIZE_FACTOR) +EVICTION_TIMEOUT_SECONDS = 60 * 60 + + +_NEXT_STATE_ID = 1 + +POWER_KEY = (EventTypes.PowerLevels, "") + + +def _gen_state_id(): + global _NEXT_STATE_ID + s = "X%d" % (_NEXT_STATE_ID,) + _NEXT_STATE_ID += 1 + return s + + +class _StateCacheEntry(object): + __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"] + + def __init__(self, state, state_group, prev_group=None, delta_ids=None): + # dict[(str, str), str] map from (type, state_key) to event_id + self.state = frozendict(state) + + # the ID of a state group if one and only one is involved. + # otherwise, None otherwise? + self.state_group = state_group + + self.prev_group = prev_group + self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None + + # The `state_id` is a unique ID we generate that can be used as ID for + # this collection of state. Usually this would be the same as the + # state group, but on worker instances we can't generate a new state + # group each time we resolve state, so we generate a separate one that + # isn't persisted and is used solely for caches. + # `state_id` is either a state_group (and so an int) or a string. This + # ensures we don't accidentally persist a state_id as a stateg_group + if state_group: + self.state_id = state_group + else: + self.state_id = _gen_state_id() + + def __len__(self): + return len(self.state) + + +class StateHandler(object): + """Fetches bits of state from the stores, and does state resolution + where necessary + """ + + def __init__(self, hs): + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.hs = hs + self._state_resolution_handler = hs.get_state_resolution_handler() + + def start_caching(self): + # TODO: remove this shim + self._state_resolution_handler.start_caching() + + @defer.inlineCallbacks + def get_current_state(self, room_id, event_type=None, state_key="", + latest_event_ids=None): + """ Retrieves the current state for the room. This is done by + calling `get_latest_events_in_room` to get the leading edges of the + event graph and then resolving any of the state conflicts. + + This is equivalent to getting the state of an event that were to send + next before receiving any new events. + + If `event_type` is specified, then the method returns only the one + event (or None) with that `event_type` and `state_key`. + + Returns: + map from (type, state_key) to event + """ + if not latest_event_ids: + latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) + + logger.debug("calling resolve_state_groups from get_current_state") + ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) + state = ret.state + + if event_type: + event_id = state.get((event_type, state_key)) + event = None + if event_id: + event = yield self.store.get_event(event_id, allow_none=True) + defer.returnValue(event) + return + + state_map = yield self.store.get_events(list(state.values()), + get_prev_content=False) + state = { + key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map + } + + defer.returnValue(state) + + @defer.inlineCallbacks + def get_current_state_ids(self, room_id, latest_event_ids=None): + """Get the current state, or the state at a set of events, for a room + + Args: + room_id (str): + + latest_event_ids (iterable[str]|None): if given, the forward + extremities to resolve. If None, we look them up from the + database (via a cache) + + Returns: + Deferred[dict[(str, str), str)]]: the state dict, mapping from + (event_type, state_key) -> event_id + """ + if not latest_event_ids: + latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) + + logger.debug("calling resolve_state_groups from get_current_state_ids") + ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) + state = ret.state + + defer.returnValue(state) + + @defer.inlineCallbacks + def get_current_user_in_room(self, room_id, latest_event_ids=None): + if not latest_event_ids: + latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) + logger.debug("calling resolve_state_groups from get_current_user_in_room") + entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) + joined_users = yield self.store.get_joined_users_from_state(room_id, entry) + defer.returnValue(joined_users) + + @defer.inlineCallbacks + def get_current_hosts_in_room(self, room_id, latest_event_ids=None): + if not latest_event_ids: + latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) + logger.debug("calling resolve_state_groups from get_current_hosts_in_room") + entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) + joined_hosts = yield self.store.get_joined_hosts(room_id, entry) + defer.returnValue(joined_hosts) + + @defer.inlineCallbacks + def compute_event_context(self, event, old_state=None): + """Build an EventContext structure for the event. + + This works out what the current state should be for the event, and + generates a new state group if necessary. + + Args: + event (synapse.events.EventBase): + old_state (dict|None): The state at the event if it can't be + calculated from existing events. This is normally only specified + when receiving an event from federation where we don't have the + prev events for, e.g. when backfilling. + Returns: + synapse.events.snapshot.EventContext: + """ + + if event.internal_metadata.is_outlier(): + # If this is an outlier, then we know it shouldn't have any current + # state. Certainly store.get_current_state won't return any, and + # persisting the event won't store the state group. + if old_state: + prev_state_ids = { + (s.type, s.state_key): s.event_id for s in old_state + } + if event.is_state(): + current_state_ids = dict(prev_state_ids) + key = (event.type, event.state_key) + current_state_ids[key] = event.event_id + else: + current_state_ids = prev_state_ids + else: + current_state_ids = {} + prev_state_ids = {} + + # We don't store state for outliers, so we don't generate a state + # group for it. + context = EventContext.with_state( + state_group=None, + current_state_ids=current_state_ids, + prev_state_ids=prev_state_ids, + ) + + defer.returnValue(context) + + if old_state: + # We already have the state, so we don't need to calculate it. + # Let's just correctly fill out the context and create a + # new state group for it. + + prev_state_ids = { + (s.type, s.state_key): s.event_id for s in old_state + } + + if event.is_state(): + key = (event.type, event.state_key) + if key in prev_state_ids: + replaces = prev_state_ids[key] + if replaces != event.event_id: # Paranoia check + event.unsigned["replaces_state"] = replaces + current_state_ids = dict(prev_state_ids) + current_state_ids[key] = event.event_id + else: + current_state_ids = prev_state_ids + + state_group = yield self.store.store_state_group( + event.event_id, + event.room_id, + prev_group=None, + delta_ids=None, + current_state_ids=current_state_ids, + ) + + context = EventContext.with_state( + state_group=state_group, + current_state_ids=current_state_ids, + prev_state_ids=prev_state_ids, + ) + + defer.returnValue(context) + + logger.debug("calling resolve_state_groups from compute_event_context") + entry = yield self.resolve_state_groups_for_events( + event.room_id, [e for e, _ in event.prev_events], + ) + + prev_state_ids = entry.state + prev_group = None + delta_ids = None + + if event.is_state(): + # If this is a state event then we need to create a new state + # group for the state after this event. + + key = (event.type, event.state_key) + if key in prev_state_ids: + replaces = prev_state_ids[key] + event.unsigned["replaces_state"] = replaces + + current_state_ids = dict(prev_state_ids) + current_state_ids[key] = event.event_id + + if entry.state_group: + # If the state at the event has a state group assigned then + # we can use that as the prev group + prev_group = entry.state_group + delta_ids = { + key: event.event_id + } + elif entry.prev_group: + # If the state at the event only has a prev group, then we can + # use that as a prev group too. + prev_group = entry.prev_group + delta_ids = dict(entry.delta_ids) + delta_ids[key] = event.event_id + + state_group = yield self.store.store_state_group( + event.event_id, + event.room_id, + prev_group=prev_group, + delta_ids=delta_ids, + current_state_ids=current_state_ids, + ) + else: + current_state_ids = prev_state_ids + prev_group = entry.prev_group + delta_ids = entry.delta_ids + + if entry.state_group is None: + entry.state_group = yield self.store.store_state_group( + event.event_id, + event.room_id, + prev_group=entry.prev_group, + delta_ids=entry.delta_ids, + current_state_ids=current_state_ids, + ) + entry.state_id = entry.state_group + + state_group = entry.state_group + + context = EventContext.with_state( + state_group=state_group, + current_state_ids=current_state_ids, + prev_state_ids=prev_state_ids, + prev_group=prev_group, + delta_ids=delta_ids, + ) + + defer.returnValue(context) + + @defer.inlineCallbacks + def resolve_state_groups_for_events(self, room_id, event_ids): + """ Given a list of event_ids this method fetches the state at each + event, resolves conflicts between them and returns them. + + Args: + room_id (str): + event_ids (list[str]): + + Returns: + Deferred[_StateCacheEntry]: resolved state + """ + logger.debug("resolve_state_groups event_ids %s", event_ids) + + # map from state group id to the state in that state group (where + # 'state' is a map from state key to event id) + # dict[int, dict[(str, str), str]] + state_groups_ids = yield self.store.get_state_groups_ids( + room_id, event_ids + ) + + if len(state_groups_ids) == 1: + name, state_list = list(state_groups_ids.items()).pop() + + prev_group, delta_ids = yield self.store.get_state_group_delta(name) + + defer.returnValue(_StateCacheEntry( + state=state_list, + state_group=name, + prev_group=prev_group, + delta_ids=delta_ids, + )) + + result = yield self._state_resolution_handler.resolve_state_groups( + room_id, state_groups_ids, None, self._state_map_factory, + ) + defer.returnValue(result) + + def _state_map_factory(self, ev_ids): + return self.store.get_events( + ev_ids, get_prev_content=False, check_redacted=False, + ) + + def resolve_events(self, state_sets, event): + logger.info( + "Resolving state for %s with %d groups", event.room_id, len(state_sets) + ) + state_set_ids = [{ + (ev.type, ev.state_key): ev.event_id + for ev in st + } for st in state_sets] + + state_map = { + ev.event_id: ev + for st in state_sets + for ev in st + } + + with Measure(self.clock, "state._resolve_events"): + new_state = resolve_events_with_state_map(state_set_ids, state_map) + + new_state = { + key: state_map[ev_id] for key, ev_id in iteritems(new_state) + } + + return new_state + + +class StateResolutionHandler(object): + """Responsible for doing state conflict resolution. + + Note that the storage layer depends on this handler, so all functions must + be storage-independent. + """ + def __init__(self, hs): + self.clock = hs.get_clock() + + # dict of set of event_ids -> _StateCacheEntry. + self._state_cache = None + self.resolve_linearizer = Linearizer(name="state_resolve_lock") + + def start_caching(self): + logger.debug("start_caching") + + self._state_cache = ExpiringCache( + cache_name="state_cache", + clock=self.clock, + max_len=SIZE_OF_CACHE, + expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, + iterable=True, + reset_expiry_on_get=True, + ) + + self._state_cache.start() + + @defer.inlineCallbacks + @log_function + def resolve_state_groups( + self, room_id, state_groups_ids, event_map, state_map_factory, + ): + """Resolves conflicts between a set of state groups + + Always generates a new state group (unless we hit the cache), so should + not be called for a single state group + + Args: + room_id (str): room we are resolving for (used for logging) + state_groups_ids (dict[int, dict[(str, str), str]]): + map from state group id to the state in that state group + (where 'state' is a map from state key to event id) + + event_map(dict[str,FrozenEvent]|None): + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_map_factory. + + If None, all events will be fetched via state_map_factory. + + Returns: + Deferred[_StateCacheEntry]: resolved state + """ + logger.debug( + "resolve_state_groups state_groups %s", + state_groups_ids.keys() + ) + + group_names = frozenset(state_groups_ids.keys()) + + with (yield self.resolve_linearizer.queue(group_names)): + if self._state_cache is not None: + cache = self._state_cache.get(group_names, None) + if cache: + defer.returnValue(cache) + + logger.info( + "Resolving state for %s with %d groups", room_id, len(state_groups_ids) + ) + + # start by assuming we won't have any conflicted state, and build up the new + # state map by iterating through the state groups. If we discover a conflict, + # we give up and instead use `resolve_events_with_factory`. + # + # XXX: is this actually worthwhile, or should we just let + # resolve_events_with_factory do it? + new_state = {} + conflicted_state = False + for st in itervalues(state_groups_ids): + for key, e_id in iteritems(st): + if key in new_state: + conflicted_state = True + break + new_state[key] = e_id + if conflicted_state: + break + + if conflicted_state: + logger.info("Resolving conflicted state for %r", room_id) + with Measure(self.clock, "state._resolve_events"): + new_state = yield resolve_events_with_factory( + list(itervalues(state_groups_ids)), + event_map=event_map, + state_map_factory=state_map_factory, + ) + + # if the new state matches any of the input state groups, we can + # use that state group again. Otherwise we will generate a state_id + # which will be used as a cache key for future resolutions, but + # not get persisted. + + with Measure(self.clock, "state.create_group_ids"): + cache = _make_state_cache_entry(new_state, state_groups_ids) + + if self._state_cache is not None: + self._state_cache[group_names] = cache + + defer.returnValue(cache) + + +def _make_state_cache_entry( + new_state, + state_groups_ids, +): + """Given a resolved state, and a set of input state groups, pick one to base + a new state group on (if any), and return an appropriately-constructed + _StateCacheEntry. + + Args: + new_state (dict[(str, str), str]): resolved state map (mapping from + (type, state_key) to event_id) + + state_groups_ids (dict[int, dict[(str, str), str]]): + map from state group id to the state in that state group + (where 'state' is a map from state key to event id) + + Returns: + _StateCacheEntry + """ + # if the new state matches any of the input state groups, we can + # use that state group again. Otherwise we will generate a state_id + # which will be used as a cache key for future resolutions, but + # not get persisted. + + # first look for exact matches + new_state_event_ids = set(itervalues(new_state)) + for sg, state in iteritems(state_groups_ids): + if len(new_state_event_ids) != len(state): + continue + + old_state_event_ids = set(itervalues(state)) + if new_state_event_ids == old_state_event_ids: + # got an exact match. + return _StateCacheEntry( + state=new_state, + state_group=sg, + ) + + # TODO: We want to create a state group for this set of events, to + # increase cache hits, but we need to make sure that it doesn't + # end up as a prev_group without being added to the database + + # failing that, look for the closest match. + prev_group = None + delta_ids = None + + for old_group, old_state in iteritems(state_groups_ids): + n_delta_ids = { + k: v + for k, v in iteritems(new_state) + if old_state.get(k) != v + } + if not delta_ids or len(n_delta_ids) < len(delta_ids): + prev_group = old_group + delta_ids = n_delta_ids + + return _StateCacheEntry( + state=new_state, + state_group=None, + prev_group=prev_group, + delta_ids=delta_ids, + ) diff --git a/synapse/state/v1.py b/synapse/state/v1.py new file mode 100644 index 0000000000..3a1f7054a1 --- /dev/null +++ b/synapse/state/v1.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import logging + +from six import iteritems, iterkeys, itervalues + +from twisted.internet import defer + +from synapse import event_auth +from synapse.api.constants import EventTypes +from synapse.api.errors import AuthError + +logger = logging.getLogger(__name__) + + +POWER_KEY = (EventTypes.PowerLevels, "") + + +def resolve_events_with_state_map(state_sets, state_map): + """ + Args: + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + state_map(dict): a dict from event_id to event, for all events in + state_sets. + + Returns + dict[(str, str), str]: + a map from (type, state_key) to event_id. + """ + if len(state_sets) == 1: + return state_sets[0] + + unconflicted_state, conflicted_state = _seperate( + state_sets, + ) + + auth_events = _create_auth_events_from_maps( + unconflicted_state, conflicted_state, state_map + ) + + return _resolve_with_state( + unconflicted_state, conflicted_state, auth_events, state_map + ) + + +@defer.inlineCallbacks +def resolve_events_with_factory(state_sets, event_map, state_map_factory): + """ + Args: + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map(dict[str,FrozenEvent]|None): + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_map_factory. + + If None, all events will be fetched via state_map_factory. + + state_map_factory(func): will be called + with a list of event_ids that are needed, and should return with + a Deferred of dict of event_id to event. + + Returns + Deferred[dict[(str, str), str]]: + a map from (type, state_key) to event_id. + """ + if len(state_sets) == 1: + defer.returnValue(state_sets[0]) + + unconflicted_state, conflicted_state = _seperate( + state_sets, + ) + + needed_events = set( + event_id + for event_ids in itervalues(conflicted_state) + for event_id in event_ids + ) + if event_map is not None: + needed_events -= set(iterkeys(event_map)) + + logger.info("Asking for %d conflicted events", len(needed_events)) + + # dict[str, FrozenEvent]: a map from state event id to event. Only includes + # the state events which are in conflict (and those in event_map) + state_map = yield state_map_factory(needed_events) + if event_map is not None: + state_map.update(event_map) + + # get the ids of the auth events which allow us to authenticate the + # conflicted state, picking only from the unconflicting state. + # + # dict[(str, str), str]: a map from state key to event id + auth_events = _create_auth_events_from_maps( + unconflicted_state, conflicted_state, state_map + ) + + new_needed_events = set(itervalues(auth_events)) + new_needed_events -= needed_events + if event_map is not None: + new_needed_events -= set(iterkeys(event_map)) + + logger.info("Asking for %d auth events", len(new_needed_events)) + + state_map_new = yield state_map_factory(new_needed_events) + state_map.update(state_map_new) + + defer.returnValue(_resolve_with_state( + unconflicted_state, conflicted_state, auth_events, state_map + )) + + +def _seperate(state_sets): + """Takes the state_sets and figures out which keys are conflicted and + which aren't. i.e., which have multiple different event_ids associated + with them in different state sets. + + Args: + state_sets(iterable[dict[(str, str), str]]): + List of dicts of (type, state_key) -> event_id, which are the + different state groups to resolve. + + Returns: + (dict[(str, str), str], dict[(str, str), set[str]]): + A tuple of (unconflicted_state, conflicted_state), where: + + unconflicted_state is a dict mapping (type, state_key)->event_id + for unconflicted state keys. + + conflicted_state is a dict mapping (type, state_key) to a set of + event ids for conflicted state keys. + """ + state_set_iterator = iter(state_sets) + unconflicted_state = dict(next(state_set_iterator)) + conflicted_state = {} + + for state_set in state_set_iterator: + for key, value in iteritems(state_set): + # Check if there is an unconflicted entry for the state key. + unconflicted_value = unconflicted_state.get(key) + if unconflicted_value is None: + # There isn't an unconflicted entry so check if there is a + # conflicted entry. + ls = conflicted_state.get(key) + if ls is None: + # There wasn't a conflicted entry so haven't seen this key before. + # Therefore it isn't conflicted yet. + unconflicted_state[key] = value + else: + # This key is already conflicted, add our value to the conflict set. + ls.add(value) + elif unconflicted_value != value: + # If the unconflicted value is not the same as our value then we + # have a new conflict. So move the key from the unconflicted_state + # to the conflicted state. + conflicted_state[key] = {value, unconflicted_value} + unconflicted_state.pop(key, None) + + return unconflicted_state, conflicted_state + + +def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map): + auth_events = {} + for event_ids in itervalues(conflicted_state): + for event_id in event_ids: + if event_id in state_map: + keys = event_auth.auth_types_for_event(state_map[event_id]) + for key in keys: + if key not in auth_events: + event_id = unconflicted_state.get(key, None) + if event_id: + auth_events[key] = event_id + return auth_events + + +def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids, + state_map): + conflicted_state = {} + for key, event_ids in iteritems(conflicted_state_ids): + events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map] + if len(events) > 1: + conflicted_state[key] = events + elif len(events) == 1: + unconflicted_state_ids[key] = events[0].event_id + + auth_events = { + key: state_map[ev_id] + for key, ev_id in iteritems(auth_event_ids) + if ev_id in state_map + } + + try: + resolved_state = _resolve_state_events( + conflicted_state, auth_events + ) + except Exception: + logger.exception("Failed to resolve state") + raise + + new_state = unconflicted_state_ids + for key, event in iteritems(resolved_state): + new_state[key] = event.event_id + + return new_state + + +def _resolve_state_events(conflicted_state, auth_events): + """ This is where we actually decide which of the conflicted state to + use. + + We resolve conflicts in the following order: + 1. power levels + 2. join rules + 3. memberships + 4. other events. + """ + resolved_state = {} + if POWER_KEY in conflicted_state: + events = conflicted_state[POWER_KEY] + logger.debug("Resolving conflicted power levels %r", events) + resolved_state[POWER_KEY] = _resolve_auth_events( + events, auth_events) + + auth_events.update(resolved_state) + + for key, events in iteritems(conflicted_state): + if key[0] == EventTypes.JoinRules: + logger.debug("Resolving conflicted join rules %r", events) + resolved_state[key] = _resolve_auth_events( + events, + auth_events + ) + + auth_events.update(resolved_state) + + for key, events in iteritems(conflicted_state): + if key[0] == EventTypes.Member: + logger.debug("Resolving conflicted member lists %r", events) + resolved_state[key] = _resolve_auth_events( + events, + auth_events + ) + + auth_events.update(resolved_state) + + for key, events in iteritems(conflicted_state): + if key not in resolved_state: + logger.debug("Resolving conflicted state %r:%r", key, events) + resolved_state[key] = _resolve_normal_events( + events, auth_events + ) + + return resolved_state + + +def _resolve_auth_events(events, auth_events): + reverse = [i for i in reversed(_ordered_events(events))] + + auth_keys = set( + key + for event in events + for key in event_auth.auth_types_for_event(event) + ) + + new_auth_events = {} + for key in auth_keys: + auth_event = auth_events.get(key, None) + if auth_event: + new_auth_events[key] = auth_event + + auth_events = new_auth_events + + prev_event = reverse[0] + for event in reverse[1:]: + auth_events[(prev_event.type, prev_event.state_key)] = prev_event + try: + # The signatures have already been checked at this point + event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False) + prev_event = event + except AuthError: + return prev_event + + return event + + +def _resolve_normal_events(events, auth_events): + for event in _ordered_events(events): + try: + # The signatures have already been checked at this point + event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False) + return event + except AuthError: + pass + + # Use the last event (the one with the least depth) if they all fail + # the auth check. + return event + + +def _ordered_events(events): + def key_func(e): + return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest() + + return sorted(events, key=key_func) -- cgit 1.5.1 From 152c0aa58edd1453f6c8c16a82cdb92165d0adba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Aug 2018 15:27:19 +0100 Subject: Add constants for room versions --- synapse/api/constants.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index b0da506f6d..912bf024bf 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -97,9 +97,14 @@ class ThirdPartyEntityKind(object): LOCATION = "location" +class RoomVersions(object): + V1 = "1" + VDH_TEST = "vdh-test-version" + + # the version we will give rooms which are created on this server -DEFAULT_ROOM_VERSION = "1" +DEFAULT_ROOM_VERSION = RoomVersions.V1 # vdh-test-version is a placeholder to get room versioning support working and tested # until we have a working v2. -KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"} +KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST} -- cgit 1.5.1 From ce6db0e5473e31292567b11599eb334c3275c564 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Aug 2018 17:01:57 +0100 Subject: Choose state algorithm based on room version --- synapse/handlers/federation.py | 8 +++- synapse/handlers/room_member.py | 5 +- synapse/state/__init__.py | 104 +++++++++++++++++++++++++++++++++++----- synapse/storage/events.py | 4 +- 4 files changed, 105 insertions(+), 16 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0dffd44e22..75a819dd11 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -274,8 +274,9 @@ class FederationHandler(BaseHandler): ev_ids, get_prev_content=False, check_redacted=False ) + room_version = yield self.store.get_room_version(pdu.room_id) state_map = yield resolve_events_with_factory( - state_groups, {pdu.event_id: pdu}, fetch + room_version, state_groups, {pdu.event_id: pdu}, fetch ) state = (yield self.store.get_events(state_map.values())).values() @@ -1811,7 +1812,10 @@ class FederationHandler(BaseHandler): (d.type, d.state_key): d for d in different_events if d }) - new_state = self.state_handler.resolve_events( + room_version = yield self.store.get_room_version(event.room_id) + + new_state = yield self.state_handler.resolve_events( + room_version, [list(local_view.values()), list(remote_view.values())], event ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 0d4a3f4677..b6010bb41e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -341,9 +341,10 @@ class RoomMemberHandler(object): prev_events_and_hashes = yield self.store.get_prev_events_for_room( room_id, ) - latest_event_ids = ( + latest_event_ids = [ event_id for (event_id, _, _) in prev_events_and_hashes - ) + ] + current_state_ids = yield self.state_handler.get_current_state_ids( room_id, latest_event_ids=latest_event_ids, ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 8c091d07c9..222daa0b28 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -23,16 +23,15 @@ from frozendict import frozendict from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, RoomVersions from synapse.events.snapshot import EventContext +from synapse.state import v1 from synapse.util.async import Linearizer from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logutils import log_function from synapse.util.metrics import Measure -from .v1 import resolve_events_with_factory, resolve_events_with_state_map - logger = logging.getLogger(__name__) @@ -263,8 +262,14 @@ class StateHandler(object): defer.returnValue(context) logger.debug("calling resolve_state_groups from compute_event_context") + if event.type == EventTypes.Create: + room_version = event.content.get("room_version", RoomVersions.V1) + else: + room_version = None + entry = yield self.resolve_state_groups_for_events( event.room_id, [e for e, _ in event.prev_events], + explicit_room_version=room_version, ) prev_state_ids = entry.state @@ -332,13 +337,17 @@ class StateHandler(object): defer.returnValue(context) @defer.inlineCallbacks - def resolve_state_groups_for_events(self, room_id, event_ids): + def resolve_state_groups_for_events(self, room_id, event_ids, + explicit_room_version=None): """ Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. Args: - room_id (str): - event_ids (list[str]): + room_id (str) + event_ids (list[str]) + explicit_room_version (str|None): If set uses the the given room + version to choose the resolution algorithm. If None, then + checks the database for room version. Returns: Deferred[_StateCacheEntry]: resolved state @@ -364,8 +373,13 @@ class StateHandler(object): delta_ids=delta_ids, )) + room_version = explicit_room_version + if not room_version: + room_version = yield self.store.get_room_version(room_id) + result = yield self._state_resolution_handler.resolve_state_groups( - room_id, state_groups_ids, None, self._state_map_factory, + room_id, room_version, state_groups_ids, None, + self._state_map_factory, ) defer.returnValue(result) @@ -374,7 +388,8 @@ class StateHandler(object): ev_ids, get_prev_content=False, check_redacted=False, ) - def resolve_events(self, state_sets, event): + @defer.inlineCallbacks + def resolve_events(self, room_version, state_sets, event): logger.info( "Resolving state for %s with %d groups", event.room_id, len(state_sets) ) @@ -389,14 +404,18 @@ class StateHandler(object): for ev in st } + room_version = yield self.store.get_room_version(event.room_id) + with Measure(self.clock, "state._resolve_events"): - new_state = resolve_events_with_state_map(state_set_ids, state_map) + new_state = resolve_events_with_state_map( + room_version, state_set_ids, state_map, + ) new_state = { key: state_map[ev_id] for key, ev_id in iteritems(new_state) } - return new_state + defer.returnValue(new_state) class StateResolutionHandler(object): @@ -429,7 +448,7 @@ class StateResolutionHandler(object): @defer.inlineCallbacks @log_function def resolve_state_groups( - self, room_id, state_groups_ids, event_map, state_map_factory, + self, room_id, room_version, state_groups_ids, event_map, state_map_factory, ): """Resolves conflicts between a set of state groups @@ -438,6 +457,7 @@ class StateResolutionHandler(object): Args: room_id (str): room we are resolving for (used for logging) + room_version (str): version of the room state_groups_ids (dict[int, dict[(str, str), str]]): map from state group id to the state in that state group (where 'state' is a map from state key to event id) @@ -491,6 +511,7 @@ class StateResolutionHandler(object): logger.info("Resolving conflicted state for %r", room_id) with Measure(self.clock, "state._resolve_events"): new_state = yield resolve_events_with_factory( + room_version, list(itervalues(state_groups_ids)), event_map=event_map, state_map_factory=state_map_factory, @@ -572,3 +593,64 @@ def _make_state_cache_entry( prev_group=prev_group, delta_ids=delta_ids, ) + + +def resolve_events_with_state_map(room_version, state_sets, state_map): + """ + Args: + room_version(str): Version of the room + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + state_map(dict): a dict from event_id to event, for all events in + state_sets. + + Returns + dict[(str, str), str]: + a map from (type, state_key) to event_id. + """ + if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): + return v1.resolve_events_with_state_map( + state_sets, state_map, + ) + else: + # This should only happen if we added a version but forgot to add it to + # the list above. + raise Exception( + "No state resolution algorithm defined for version %r" % (room_version,) + ) + + +def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): + """ + Args: + room_version(str): Version of the room + + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map(dict[str,FrozenEvent]|None): + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_map_factory. + + If None, all events will be fetched via state_map_factory. + + state_map_factory(func): will be called + with a list of event_ids that are needed, and should return with + a Deferred of dict of event_id to event. + + Returns + Deferred[dict[(str, str), str]]: + a map from (type, state_key) to event_id. + """ + if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): + return v1.resolve_events_with_factory( + state_sets, event_map, state_map_factory, + ) + else: + # This should only happen if we added a version but forgot to add it to + # the list above. + raise Exception( + "No state resolution algorithm defined for version %r" % (room_version,) + ) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index ce32e8fefd..dc68d365c2 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -705,9 +705,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore } events_map = {ev.event_id: ev for ev, _ in events_context} + room_version = yield self.get_room_version(room_id) + logger.debug("calling resolve_state_groups from preserve_events") res = yield self._state_resolution_handler.resolve_state_groups( - room_id, state_groups, events_map, get_events + room_id, room_version, state_groups, events_map, get_events ) defer.returnValue((res.state, None)) -- cgit 1.5.1 From bb99b1f5507e009bc32a82ba6ef29ba2d91e6b65 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Aug 2018 14:44:10 +0100 Subject: Add fast path in state res for zero prev events --- synapse/state/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 222daa0b28..f1c26d1071 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -361,7 +361,12 @@ class StateHandler(object): room_id, event_ids ) - if len(state_groups_ids) == 1: + if len(state_groups_ids) == 0: + defer.returnValue(_StateCacheEntry( + state={}, + state_group=None, + )) + elif len(state_groups_ids) == 1: name, state_list = list(state_groups_ids.items()).pop() prev_group, delta_ids = yield self.store.get_state_group_delta(name) -- cgit 1.5.1 From 01021c812f86466a5f35db8afd443b9cb17bd092 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 9 Aug 2018 22:16:00 +0100 Subject: wip at implementing MSC 7075 --- .../resource_limits_server_notices.py | 84 ++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 synapse/server_notices/resource_limits_server_notices.py (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py new file mode 100644 index 0000000000..33b1d80cd9 --- /dev/null +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from six import iteritems, string_types + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.api.urls import ConsentURIBuilder +from synapse.config import ConfigError +from synapse.types import get_localpart_from_id + +logger = logging.getLogger(__name__) + + +class ResourceLimitsServerNotices(object): + """ + """ + def __init__(self, hs): + """ + + Args: + hs (synapse.server.HomeServer): + """ + self._server_notices_manager = hs.get_server_notices_manager() + self._store = hs.get_datastore() + self._api = hs.get_api() + self._server_notice_content = hs.config.user_consent_server_notice_content + self._limit_usage_by_mau = config.limit_usage_by_mau = False + self._hs_disabled.config.hs_disabled = False + + self._notified = set() + self._resouce_limited = False + # Config checks? + + @defer.inlineCallbacks + def maybe_send_server_notice_to_user(self, user_id): + """Check if we need to send a notice to this user, and does so if so + + Args: + user_id (str): user to check + + Returns: + Deferred + """ + if self._limit_usage_by_mau is False and self._hs_disabled is False: + # not enabled + return + + timestamp = yield self.store.user_last_seen_monthly_active(user_id) + if timestamp is None: + # This user will be blocked from receiving the notice anyway + return + try: + yield self.api.check_auth_blocking() + if self._resouce_limited: + # Need to start removing notices + pass + except AuthError as e: + # Need to start notifying of blocking + if not self._resouce_limited: + pass + + # need to send a message. + try: + yield self._server_notices_manager.send_notice( + user_id, content, + ) + + except SynapseError as e: + logger.error("Error sending server notice about resource limits: %s", e) -- cgit 1.5.1 From 6c6aba76e1059f103ed48487e74ab4161347638b Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 10 Aug 2018 15:12:59 +0100 Subject: implementation of server notices to alert on hitting resource limits --- .../resource_limits_server_notices.py | 77 ++++++++----- synapse/server_notices/server_notices_sender.py | 33 +++--- .../test_resource_limits_server_notices.py | 125 +++++++++++++++++++++ 3 files changed, 190 insertions(+), 45 deletions(-) create mode 100644 tests/server_notices/test_resource_limits_server_notices.py (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 33b1d80cd9..017828f81e 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -14,14 +14,9 @@ # limitations under the License. import logging -from six import iteritems, string_types - from twisted.internet import defer -from synapse.api.errors import SynapseError -from synapse.api.urls import ConsentURIBuilder -from synapse.config import ConfigError -from synapse.types import get_localpart_from_id +from synapse.api.errors import AuthError, SynapseError logger = logging.getLogger(__name__) @@ -37,12 +32,12 @@ class ResourceLimitsServerNotices(object): """ self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastore() - self._api = hs.get_api() + self.auth = hs.get_auth() self._server_notice_content = hs.config.user_consent_server_notice_content - self._limit_usage_by_mau = config.limit_usage_by_mau = False - self._hs_disabled.config.hs_disabled = False + self._limit_usage_by_mau = hs.config.limit_usage_by_mau = False + self._hs_disabled = hs.config.hs_disabled - self._notified = set() + self._notified_of_blocking = set() self._resouce_limited = False # Config checks? @@ -56,29 +51,49 @@ class ResourceLimitsServerNotices(object): Returns: Deferred """ - if self._limit_usage_by_mau is False and self._hs_disabled is False: - # not enabled + if self._hs_disabled is True: return - timestamp = yield self.store.user_last_seen_monthly_active(user_id) - if timestamp is None: - # This user will be blocked from receiving the notice anyway - return - try: - yield self.api.check_auth_blocking() - if self._resouce_limited: + if self._limit_usage_by_mau is True: + timestamp = yield self._store.user_last_seen_monthly_active(user_id) + if timestamp is None: + # This user will be blocked from receiving the notice anyway. + # In practice, not sure we can ever get here + return + try: + yield self.auth.check_auth_blocking() + self._resouce_limited = False # Need to start removing notices - pass - except AuthError as e: - # Need to start notifying of blocking - if not self._resouce_limited: - pass + if user_id in self._notified_of_blocking: + # Send message to remove warning - needs updating + content = "remove warning" + self._send_server_notice(user_id, content) + self._notified_of_blocking.remove(user_id) - # need to send a message. - try: - yield self._server_notices_manager.send_notice( - user_id, content, - ) + except AuthError: + # Need to start notifying of blocking + + self._resouce_limited = True + if user_id not in self._notified_of_blocking: + # Send message to add warning - needs updating + content = "add warning" + self._send_server_notice(user_id, content) + self._notified_of_blocking.add(user_id) - except SynapseError as e: - logger.error("Error sending server notice about resource limits: %s", e) + @defer.inlineCallbacks + def _send_server_notice(self, user_id, content): + """Sends Server notice + + Args: + user_id(str): The user to send to + content(str): The content of the message + + Returns: + Deferred[] + """ + try: + yield self._server_notices_manager.send_notice( + user_id, content, + ) + except SynapseError as e: + logger.error("Error sending server notice about resource limits: %s", e) diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py index 5d23965f34..6121b2f267 100644 --- a/synapse/server_notices/server_notices_sender.py +++ b/synapse/server_notices/server_notices_sender.py @@ -12,7 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet import defer + from synapse.server_notices.consent_server_notices import ConsentServerNotices +from synapse.server_notices.resource_limits_server_notices import ( + ResourceLimitsServerNotices, +) class ServerNoticesSender(object): @@ -25,34 +30,34 @@ class ServerNoticesSender(object): Args: hs (synapse.server.HomeServer): """ - # todo: it would be nice to make this more dynamic - self._consent_server_notices = ConsentServerNotices(hs) + self._server_notices = ( + ConsentServerNotices(hs), + ResourceLimitsServerNotices(hs) + ) + @defer.inlineCallbacks def on_user_syncing(self, user_id): """Called when the user performs a sync operation. Args: user_id (str): mxid of user who synced - - Returns: - Deferred """ - return self._consent_server_notices.maybe_send_server_notice_to_user( - user_id, - ) + for sn in self._server_notices: + yield sn.maybe_send_server_notice_to_user( + user_id, + ) + @defer.inlineCallbacks def on_user_ip(self, user_id): """Called on the master when a worker process saw a client request. Args: user_id (str): mxid - - Returns: - Deferred """ # The synchrotrons use a stubbed version of ServerNoticesSender, so # we check for notices to send to the user in on_user_ip as well as # in on_user_syncing - return self._consent_server_notices.maybe_send_server_notice_to_user( - user_id, - ) + for sn in self._server_notices: + yield sn.maybe_send_server_notice_to_user( + user_id, + ) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py new file mode 100644 index 0000000000..a69253f1b6 --- /dev/null +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -0,0 +1,125 @@ +from mock import Mock + +from twisted.internet import defer + +from synapse.api.errors import AuthError +from synapse.handlers.auth import AuthHandler +from synapse.server_notices.resource_limits_server_notices import ( + ResourceLimitsServerNotices, +) + +from tests import unittest +from tests.utils import setup_test_homeserver + + +class AuthHandlers(object): + def __init__(self, hs): + self.auth_handler = AuthHandler(hs) + + +class TestResourceLimitsServerNotices(unittest.TestCase): + @defer.inlineCallbacks + def setUp(self): + self.hs = yield setup_test_homeserver(handlers=None) + self.hs.handlers = AuthHandlers(self.hs) + self.auth_handler = self.hs.handlers.auth_handler + self.server_notices_sender = self.hs.get_server_notices_sender() + + # relying on [1] is far from ideal, but the only case where + # ResourceLimitsServerNotices class needs to be isolated is this test, + # general code should never have a reason to do so ... + self._rlsn = self.server_notices_sender._server_notices[1] + if not isinstance(self._rlsn, ResourceLimitsServerNotices): + raise Exception("Failed to find reference to ResourceLimitsServerNotices") + + self._rlsn._store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed(1000) + ) + self._send_notice = self._rlsn._server_notices_manager.send_notice + self._rlsn._server_notices_manager.send_notice = Mock() + self._send_notice = self._rlsn._server_notices_manager.send_notice + + self._rlsn._limit_usage_by_mau = True + self.user_id = "user_id" + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_flag_off(self): + """Tests cases where the flags indicate nothing to do""" + # test hs disabled case + self._hs_disabled = True + + yield self._rlsn.maybe_send_server_notice_to_user("user_id") + + self._send_notice.assert_not_called() + # Test when mau limiting disabled + self._hs_disabled = False + self._rlsn._limit_usage_by_mau = False + yield self._rlsn.maybe_send_server_notice_to_user("user_id") + + self._send_notice.assert_not_called() + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): + """Test when user has blocked notice, but should have it removed""" + + self._rlsn._notified_of_blocking.add(self.user_id) + self._rlsn.auth.check_auth_blocking = Mock() + + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + # "remove warning" obviously aweful, but test will start failing when code + # actually sends a real event, and then it can be updated + + self._send_notice.assert_called_once_with(self.user_id, "remove warning") + self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): + """Test when user has blocked notice, but notice ought to be there (NOOP)""" + self._rlsn._notified_of_blocking.add(self.user_id) + self._rlsn.auth.check_auth_blocking = Mock( + side_effect=AuthError(403, 'foo') + ) + + yield self._rlsn.maybe_send_server_notice_to_user("user_id") + + self._send_notice.assert_not_called() + self.assertTrue(self.user_id in self._rlsn._notified_of_blocking) + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_add_blocked_notice(self): + """Test when user does not have blocked notice, but should have one""" + + self._rlsn.auth.check_auth_blocking = Mock(side_effect=AuthError(403, 'foo')) + yield self._rlsn.maybe_send_server_notice_to_user("user_id") + + # "add warning" obviously awful, but test will start failing when code + # actually sends a real event, and then it can be updated + self._send_notice.assert_called_once_with(self.user_id, "add warning") + self.assertTrue(self.user_id in self._rlsn._notified_of_blocking) + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): + """Test when user does not have blocked notice, nor should they (NOOP)""" + + self._rlsn.auth.check_auth_blocking = Mock() + + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + + self._send_notice.assert_not_called() + self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) + + @defer.inlineCallbacks + def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): + + """Test when user is not part of the MAU cohort - this should not ever + happen - but ... + """ + + self._rlsn.auth.check_auth_blocking = Mock() + self._rlsn._store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed(None) + ) + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + + self._send_notice.assert_not_called() + self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) -- cgit 1.5.1 From 53ace904b2b8e2444bc518b2498947c869c8c13b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 01:29:25 +0000 Subject: total WIP skeleton for /room_keys API --- synapse/handlers/e2e_room_keys.py | 60 ++++++++++ synapse/rest/client/v2_alpha/room_keys.py | 56 +++++++++ synapse/storage/e2e_room_keys.py | 133 ++++++++++++++++++++++ synapse/storage/schema/delta/46/e2e_room_keys.sql | 40 +++++++ 4 files changed, 289 insertions(+) create mode 100644 synapse/handlers/e2e_room_keys.py create mode 100644 synapse/rest/client/v2_alpha/room_keys.py create mode 100644 synapse/storage/e2e_room_keys.py create mode 100644 synapse/storage/schema/delta/46/e2e_room_keys.sql (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py new file mode 100644 index 0000000000..78c838a829 --- /dev/null +++ b/synapse/handlers/e2e_room_keys.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ujson as json +import logging + +from canonicaljson import encode_canonical_json +from twisted.internet import defer + +from synapse.api.errors import SynapseError, CodeMessageException +from synapse.types import get_domain_from_id +from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.retryutils import NotRetryingDestination + +logger = logging.getLogger(__name__) + + +class E2eRoomKeysHandler(object): + def __init__(self, hs): + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def get_room_keys(self, user_id, version, room_id, session_id): + results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) + defer.returnValue(results) + + @defer.inlineCallbacks + def upload_room_keys(self, user_id, version, room_keys): + + # TODO: Validate the JSON to make sure it has the right keys. + + # go through the room_keys + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + session = room_keys['rooms'][room_id]['sessions'][session_id] + + # get a lock + + # get the room_key for this particular row + yield self.store.get_e2e_room_key() + + # check whether we merge or not + if() + + # if so, we set it + yield self.store.set_e2e_room_key() + + # release the lock diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py new file mode 100644 index 0000000000..9b93001919 --- /dev/null +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, parse_json_object_from_request, parse_integer +) +from synapse.http.servlet import parse_string +from synapse.types import StreamToken +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class RoomKeysUploadServlet(RestServlet): + PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysUploadServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + + @defer.inlineCallbacks + def on_POST(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + user_id = requester.user.to_string() + body = parse_json_object_from_request(request) + + result = yield self.e2e_room_keys_handler.upload_room_keys( + user_id, version, body + ) + defer.returnValue((200, result)) + + +def register_servlets(hs, http_server): + RoomKeysUploadServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py new file mode 100644 index 0000000000..9f6d47e1b6 --- /dev/null +++ b/synapse/storage/e2e_room_keys.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.internet import defer + +from synapse.util.caches.descriptors import cached + +from canonicaljson import encode_canonical_json +import ujson as json + +from ._base import SQLBaseStore + + +class EndToEndRoomKeyStore(SQLBaseStore): + + @defer.inlineCallbacks + def get_e2e_room_key(self, user_id, version, room_id, session_id): + + row = yield self._simple_select_one( + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "version": version, + "room_id": room_id, + "session_id": session_id, + }, + retcols=( + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_key", + ) + + defer.returnValue(row); + + def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): + + def _set_e2e_room_key_txn(txn): + + self._simple_upsert( + txn, + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + } + values=[ + { + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + } + ], + lock=False, + ) + + return True + + return self.runInteraction( + "set_e2e_room_key", _set_e2e_room_key_txn + ) + + + def set_e2e_room_keys(self, user_id, version, room_keys): + + def _set_e2e_room_keys_txn(txn): + + self._simple_insert_many_txn( + txn, + table="e2e_room_keys", + values=[ + { + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + "version": version, + "first_message_index": room_keys['rooms'][room_id]['sessions'][session_id]['first_message_index'], + "forwarded_count": room_keys['rooms'][room_id]['sessions'][session_id]['forwarded_count'], + "is_verified": room_keys['rooms'][room_id]['sessions'][session_id]['is_verified'], + "session_data": room_keys['rooms'][room_id]['sessions'][session_id]['session_data'], + } + for session_id in room_keys['rooms'][room_id]['sessions'] + for room_id in room_keys['rooms'] + ] + ) + + return True + + return self.runInteraction( + "set_e2e_room_keys", _set_e2e_room_keys_txn + ) + + @defer.inlineCallbacks + def get_e2e_room_keys(self, user_id, version, room_id, session_id): + + keyvalues={ + "user_id": user_id, + "version": version, + } + if room_id: keyvalues['room_id'] = room_id + if session_id: keyvalues['session_id'] = session_id + + rows = yield self._simple_select_list( + table="e2e_room_keys", + keyvalues=keyvalues, + retcols=( + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_keys", + ) + + sessions = {} + sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; + defer.returnValue(sessions); diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql new file mode 100644 index 0000000000..51b826e8b3 --- /dev/null +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -0,0 +1,40 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- users' optionally backed up encrypted e2e sessions +CREATE TABLE e2e_room_keys ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + session_id TEXT NOT NULL, + version INT NOT NULL, + first_message_index INT, + forwarded_count INT, + is_verified BOOLEAN, + session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); +CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); +CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); + +-- the versioning metadata about versions of users' encrypted e2e session backups +CREATE TABLE e2e_room_key_versions ( + user_id TEXT NOT NULL, + version INT NOT NULL, + algorithm TEXT NOT NULL, + dummy_session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); -- cgit 1.5.1 From 0bc4627a731d0edd437905a5b07e85421c7553d8 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 17:54:48 +0000 Subject: interim WIP checkin; doesn't build yet --- synapse/handlers/e2e_room_keys.py | 46 +++++++++++++++++++++---------- synapse/rest/client/v2_alpha/room_keys.py | 37 ++++++++++++++++++++++--- synapse/storage/e2e_room_keys.py | 20 ++++++++++++++ 3 files changed, 84 insertions(+), 19 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 78c838a829..15e3beb5ed 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -20,8 +20,7 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import SynapseError, CodeMessageException -from synapse.types import get_domain_from_id -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.async import Linearizer from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -30,31 +29,48 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): def __init__(self, hs): self.store = hs.get_datastore() + self._upload_linearizer = async.Linearizer("upload_room_keys_lock") @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) defer.returnValue(results) + @defer.inlineCallbacks + def delete_room_keys(self, user_id, version, room_id, session_id): + yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): # TODO: Validate the JSON to make sure it has the right keys. - # go through the room_keys - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - session = room_keys['rooms'][room_id]['sessions'][session_id] - - # get a lock + # XXX: perhaps we should use a finer grained lock here? + with (yield self._upload_linearizer.queue(user_id): - # get the room_key for this particular row - yield self.store.get_e2e_room_key() + # go through the room_keys + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + room_key = room_keys['rooms'][room_id]['sessions'][session_id] - # check whether we merge or not - if() + # get the room_key for this particular row + current_room_key = yield self.store.get_e2e_room_key( + user_id, version, room_id, session_id + ) - # if so, we set it - yield self.store.set_e2e_room_key() + # check whether we merge or not. spelling it out with if/elifs rather than + # lots of booleans for legibility. + replace = False + if current_room_key: + if room_key['is_verified'] and not current_room_key['is_verified']: + replace = True + elif room_key['first_message_index'] < current_room_key['first_message_index']: + replace = True + elif room_key['forwarded_count'] < room_key['forwarded_count']: + replace = True - # release the lock + # if so, we set the new room_key + if replace: + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 9b93001919..7291018a48 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -28,7 +28,7 @@ from ._base import client_v2_patterns logger = logging.getLogger(__name__) -class RoomKeysUploadServlet(RestServlet): +class RoomKeysServlet(RestServlet): PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") def __init__(self, hs): @@ -41,16 +41,45 @@ class RoomKeysUploadServlet(RestServlet): self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @defer.inlineCallbacks - def on_POST(self, request, room_id, session_id): - requester = yield self.auth.get_user_by_req(request, allow_guest=True) + def on_PUT(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) + version = request.args.get("version", None) + + if session_id: + body = { "sessions": { session_id : body } } + + if room_id: + body = { "rooms": { room_id : body } } result = yield self.e2e_room_keys_handler.upload_room_keys( user_id, version, body ) defer.returnValue((200, result)) + @defer.inlineCallbacks + def on_GET(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + version = request.args.get("version", None) + + room_keys = yield self.e2e_room_keys_handler.get_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue((200, room_keys)) + + @defer.inlineCallbacks + def on_DELETE(self, request, room_id, session_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + version = request.args.get("version", None) + + yield self.e2e_room_keys_handler.delete_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue((200, {})) + def register_servlets(hs, http_server): - RoomKeysUploadServlet(hs).register(http_server) + RoomKeysServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 9f6d47e1b6..903dc083f8 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from twisted.internet import defer from synapse.util.caches.descriptors import cached @@ -77,6 +78,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) + # XXX: this isn't currently used and isn't tested anywhere + # it could be used in future for bulk-uploading new versions of room_keys + # for a user or something though. def set_e2e_room_keys(self, user_id, version, room_keys): def _set_e2e_room_keys_txn(txn): @@ -131,3 +135,19 @@ class EndToEndRoomKeyStore(SQLBaseStore): sessions = {} sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; defer.returnValue(sessions); + + @defer.inlineCallbacks + def delete_e2e_room_keys(self, user_id, version, room_id, session_id): + + keyvalues={ + "user_id": user_id, + "version": version, + } + if room_id: keyvalues['room_id'] = room_id + if session_id: keyvalues['session_id'] = session_id + + yield self._simple_delete( + table="e2e_room_keys", + keyvalues=keyvalues, + desc="delete_e2e_room_keys", + ) -- cgit 1.5.1 From 6b8c07abc293bd222051e769550753bc0fd6f667 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 21:44:25 +0000 Subject: make it work and fix pep8 --- synapse/handlers/e2e_room_keys.py | 69 +++++++++------ synapse/rest/__init__.py | 2 + synapse/rest/client/v2_alpha/room_keys.py | 33 ++++--- synapse/server.py | 5 ++ synapse/storage/__init__.py | 2 + synapse/storage/e2e_room_keys.py | 103 +++++++++++++--------- synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- 7 files changed, 134 insertions(+), 82 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 15e3beb5ed..93f4ad5194 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -13,15 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ujson as json import logging -from canonicaljson import encode_canonical_json from twisted.internet import defer -from synapse.api.errors import SynapseError, CodeMessageException +from synapse.api.errors import StoreError from synapse.util.async import Linearizer -from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -29,11 +26,13 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): def __init__(self, hs): self.store = hs.get_datastore() - self._upload_linearizer = async.Linearizer("upload_room_keys_lock") + self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): - results = yield self.store.get_e2e_room_keys(user_id, version, room_id, session_id) + results = yield self.store.get_e2e_room_keys( + user_id, version, room_id, session_id + ) defer.returnValue(results) @defer.inlineCallbacks @@ -46,31 +45,49 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? - with (yield self._upload_linearizer.queue(user_id): + with (yield self._upload_linearizer.queue(user_id)): # go through the room_keys for room_id in room_keys['rooms']: for session_id in room_keys['rooms'][room_id]['sessions']: room_key = room_keys['rooms'][room_id]['sessions'][session_id] - # get the room_key for this particular row - current_room_key = yield self.store.get_e2e_room_key( - user_id, version, room_id, session_id + yield self._upload_room_key( + user_id, version, room_id, session_id, room_key ) - # check whether we merge or not. spelling it out with if/elifs rather than - # lots of booleans for legibility. - replace = False - if current_room_key: - if room_key['is_verified'] and not current_room_key['is_verified']: - replace = True - elif room_key['first_message_index'] < current_room_key['first_message_index']: - replace = True - elif room_key['forwarded_count'] < room_key['forwarded_count']: - replace = True - - # if so, we set the new room_key - if replace: - yield self.store.set_e2e_room_key( - user_id, version, room_id, session_id, room_key - ) + @defer.inlineCallbacks + def _upload_room_key(self, user_id, version, room_id, session_id, room_key): + # get the room_key for this particular row + current_room_key = None + try: + current_room_key = yield self.store.get_e2e_room_key( + user_id, version, room_id, session_id + ) + except StoreError as e: + if e.code == 404: + pass + else: + raise + + # check whether we merge or not. spelling it out with if/elifs rather + # than lots of booleans for legibility. + upsert = True + if current_room_key: + if room_key['is_verified'] and not current_room_key['is_verified']: + pass + elif ( + room_key['first_message_index'] < + current_room_key['first_message_index'] + ): + pass + elif room_key['forwarded_count'] < room_key['forwarded_count']: + pass + else: + upsert = False + + # if so, we set the new room_key + if upsert: + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 3418f06fd6..4856822a5d 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -46,6 +46,7 @@ from synapse.rest.client.v2_alpha import ( receipts, register, report_event, + room_keys, sendtodevice, sync, tags, @@ -102,6 +103,7 @@ class ClientRestResource(JsonResource): auth.register_servlets(hs, client_resource) receipts.register_servlets(hs, client_resource) read_marker.register_servlets(hs, client_resource) + room_keys.register_servlets(hs, client_resource) keys.register_servlets(hs, client_resource) tokenrefresh.register_servlets(hs, client_resource) tags.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 7291018a48..010aed98f9 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,26 +17,25 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, parse_integer + RestServlet, parse_json_object_from_request ) -from synapse.http.servlet import parse_string -from synapse.types import StreamToken from ._base import client_v2_patterns logger = logging.getLogger(__name__) class RoomKeysServlet(RestServlet): - PATTERNS = client_v2_patterns("/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$") + PATTERNS = client_v2_patterns( + "/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$" + ) def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysUploadServlet, self).__init__() + super(RoomKeysServlet, self).__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -45,24 +44,32 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = request.args.get("version", None) + version = request.args.get("version")[0] if session_id: - body = { "sessions": { session_id : body } } + body = { + "sessions": { + session_id: body + } + } if room_id: - body = { "rooms": { room_id : body } } + body = { + "rooms": { + room_id: body + } + } - result = yield self.e2e_room_keys_handler.upload_room_keys( + yield self.e2e_room_keys_handler.upload_room_keys( user_id, version, body ) - defer.returnValue((200, result)) + defer.returnValue((200, {})) @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version", None) + version = request.args.get("version")[0] room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id @@ -73,7 +80,7 @@ class RoomKeysServlet(RestServlet): def on_DELETE(self, request, room_id, session_id): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version", None) + version = request.args.get("version")[0] yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id diff --git a/synapse/server.py b/synapse/server.py index 140be9ebe8..706cb1361f 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -49,6 +49,7 @@ from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.e2e_keys import E2eKeysHandler +from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.groups_local import GroupsLocalHandler from synapse.handlers.initial_sync import InitialSyncHandler @@ -127,6 +128,7 @@ class HomeServer(object): 'auth_handler', 'device_handler', 'e2e_keys_handler', + 'e2e_room_keys_handler', 'event_handler', 'event_stream_handler', 'initial_sync_handler', @@ -288,6 +290,9 @@ class HomeServer(object): def build_e2e_keys_handler(self): return E2eKeysHandler(self) + def build_e2e_room_keys_handler(self): + return E2eRoomKeysHandler(self) + def build_application_service_api(self): return ApplicationServiceApi(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 134e4a80f1..69cb28268a 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,6 +30,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt from .client_ips import ClientIpStore from .deviceinbox import DeviceInboxStore from .directory import DirectoryStore +from .e2e_room_keys import EndToEndRoomKeyStore from .end_to_end_keys import EndToEndKeyStore from .engines import PostgresEngine from .event_federation import EventFederationStore @@ -76,6 +77,7 @@ class DataStore(RoomMemberStore, RoomStore, ApplicationServiceTransactionStore, ReceiptsStore, EndToEndKeyStore, + EndToEndRoomKeyStore, SearchStore, TagsStore, AccountDataStore, diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 903dc083f8..5982710bd5 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -15,11 +15,6 @@ from twisted.internet import defer -from synapse.util.caches.descriptors import cached - -from canonicaljson import encode_canonical_json -import ujson as json - from ._base import SQLBaseStore @@ -45,29 +40,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) - defer.returnValue(row); + defer.returnValue(row) def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): def _set_e2e_room_key_txn(txn): - self._simple_upsert( + self._simple_upsert_txn( txn, table="e2e_room_keys", keyvalues={ "user_id": user_id, "room_id": room_id, - "session_id": session_id, - } - values=[ - { - "version": version, - "first_message_index": room_key['first_message_index'], - "forwarded_count": room_key['forwarded_count'], - "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], - } - ], + "session_id": session_id, + }, + values={ + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + }, lock=False, ) @@ -77,7 +70,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): "set_e2e_room_key", _set_e2e_room_key_txn ) - # XXX: this isn't currently used and isn't tested anywhere # it could be used in future for bulk-uploading new versions of room_keys # for a user or something though. @@ -85,23 +77,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): def _set_e2e_room_keys_txn(txn): + values = [] + for room_id in room_keys['rooms']: + for session_id in room_keys['rooms'][room_id]['sessions']: + session = room_keys['rooms'][room_id]['sessions'][session_id] + values.append( + { + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + "version": version, + "first_message_index": session['first_message_index'], + "forwarded_count": session['forwarded_count'], + "is_verified": session['is_verified'], + "session_data": session['session_data'], + } + ) + self._simple_insert_many_txn( txn, table="e2e_room_keys", - values=[ - { - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - "version": version, - "first_message_index": room_keys['rooms'][room_id]['sessions'][session_id]['first_message_index'], - "forwarded_count": room_keys['rooms'][room_id]['sessions'][session_id]['forwarded_count'], - "is_verified": room_keys['rooms'][room_id]['sessions'][session_id]['is_verified'], - "session_data": room_keys['rooms'][room_id]['sessions'][session_id]['session_data'], - } - for session_id in room_keys['rooms'][room_id]['sessions'] - for room_id in room_keys['rooms'] - ] + values=values ) return True @@ -113,17 +109,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys(self, user_id, version, room_id, session_id): - keyvalues={ + keyvalues = { "user_id": user_id, "version": version, } - if room_id: keyvalues['room_id'] = room_id - if session_id: keyvalues['session_id'] = session_id + if room_id: + keyvalues['room_id'] = room_id + if session_id: + keyvalues['session_id'] = session_id rows = yield self._simple_select_list( table="e2e_room_keys", keyvalues=keyvalues, retcols=( + "user_id", + "room_id", + "session_id", "first_message_index", "forwarded_count", "is_verified", @@ -132,19 +133,37 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = {} - sessions['rooms'][roomId]['sessions'][session_id] = row for row in rows; - defer.returnValue(sessions); + # perlesque autovivification from https://stackoverflow.com/a/19829714/6764493 + class AutoVivification(dict): + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + value = self[item] = type(self)() + return value + + sessions = AutoVivification() + for row in rows: + sessions['rooms'][row['room_id']]['sessions'][row['session_id']] = { + "first_message_index": row["first_message_index"], + "forwarded_count": row["forwarded_count"], + "is_verified": row["is_verified"], + "session_data": row["session_data"], + } + + defer.returnValue(sessions) @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id, session_id): - keyvalues={ + keyvalues = { "user_id": user_id, "version": version, } - if room_id: keyvalues['room_id'] = room_id - if session_id: keyvalues['session_id'] = session_id + if room_id: + keyvalues['room_id'] = room_id + if session_id: + keyvalues['session_id'] = session_id yield self._simple_delete( table="e2e_room_keys", diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 51b826e8b3..6b344c5ad7 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -29,7 +29,7 @@ CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); --- the versioning metadata about versions of users' encrypted e2e session backups +-- the metadata for each generation of encrypted e2e session backups CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, version INT NOT NULL, -- cgit 1.5.1 From cf1e2000f623afea8f3afb58e4a7659288c45777 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 5 Dec 2017 23:06:43 +0000 Subject: document the API --- synapse/rest/client/v2_alpha/room_keys.py | 133 ++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 010aed98f9..be82eccb2b 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -41,6 +41,79 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, room_id, session_id): + """ + Uploads one or more encrypted E2E room keys for backup purposes. + room_id: the ID of the room the keys are for (optional) + session_id: the ID for the E2E room keys for the room (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + + Each session has: + * first_message_index: a numeric index indicating the oldest message + encrypted by this session. + * forwarded_count: how many times the uploading client claims this key + has been shared (forwarded) + * is_verified: whether the client that uploaded the keys claims they + were sent by a device which they've verified + * session_data: base64-encrypted data describing the session. + + Returns 200 OK on success with body {} + + The API is designed to be otherwise agnostic to the room_key encryption + algorithm being used. Sessions are merged with existing ones in the + backup using the heuristics: + * is_verified sessions always win over unverified sessions + * older first_message_index always win over newer sessions + * lower forwarded_count always wins over higher forwarded_count + + We trust the clients not to lie and corrupt their own backups. + + POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 + Content-Type: application/json + + { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + + Or... + + POST /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1 + Content-Type: application/json + + { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + + Or... + + POST /room_keys/keys?version=1 HTTP/1.1 + Content-Type: application/json + + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -67,6 +140,57 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): + """ + Retrieves one or more encrypted E2E room keys for backup purposes. + Symmetric with the PUT version of the API. + + room_id: the ID of the room to retrieve the keys for (optional) + session_id: the ID for the E2E room keys to retrieve the keys for (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + + Returns as follows: + + GET /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 + { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + + Or... + + GET /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1 + { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + + Or... + + GET /room_keys/keys?version=1 HTTP/1.1 + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() version = request.args.get("version")[0] @@ -78,6 +202,15 @@ class RoomKeysServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): + """ + Deletes one or more encrypted E2E room keys for a user for backup purposes. + + room_id: the ID of the room whose keys to delete (optional) + session_id: the ID for the E2E session to delete (optional) + version: the version of the user's backup which this data is for. + the version must already have been created via the /change_secret API. + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() version = request.args.get("version")[0] -- cgit 1.5.1 From 8ae64b270f7742abfe4cf0b8d140c81993464ea4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 6 Dec 2017 01:02:57 +0000 Subject: implement /room_keys/version too (untested) --- synapse/api/errors.py | 25 ++++++++++ synapse/handlers/e2e_room_keys.py | 47 +++++++++++++++++-- synapse/rest/client/v2_alpha/room_keys.py | 47 +++++++++++++++++++ synapse/storage/e2e_room_keys.py | 56 +++++++++++++++++++++++ synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- 5 files changed, 171 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index b41d595059..8c97e91ba1 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -56,6 +56,7 @@ class Codes(object): CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN" CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM" MAU_LIMIT_EXCEEDED = "M_MAU_LIMIT_EXCEEDED" + WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION" class CodeMessageException(RuntimeError): @@ -285,6 +286,30 @@ class LimitExceededError(SynapseError): ) +class RoomKeysVersionError(SynapseError): + """A client has tried to upload to a non-current version of the room_keys store + """ + def __init__(self, code=403, msg="Wrong room_keys version", current_version=None, + errcode=Codes.WRONG_ROOM_KEYS_VERSION): + super(RoomKeysVersionError, self).__init__(code, msg, errcode) + self.current_version = current_version + + def error_dict(self): + return cs_error( + self.msg, + self.errcode, + current_version=self.current_version, + ) + + +def cs_exception(exception): + if isinstance(exception, CodeMessageException): + return exception.error_dict() + else: + logger.error("Unknown exception type: %s", type(exception)) + return {} + + def cs_error(msg, code=Codes.UNKNOWN, **kwargs): """ Utility method for constructing an error response for client-server interactions. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 93f4ad5194..4333ca610c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import StoreError +from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError from synapse.util.async import Linearizer logger = logging.getLogger(__name__) @@ -30,10 +30,13 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id, session_id): - results = yield self.store.get_e2e_room_keys( - user_id, version, room_id, session_id - ) - defer.returnValue(results) + # we deliberately take the lock to get keys so that changing the version + # works atomically + with (yield self._upload_linearizer.queue(user_id)): + results = yield self.store.get_e2e_room_keys( + user_id, version, room_id, session_id + ) + defer.returnValue(results) @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id, session_id): @@ -44,6 +47,16 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. + # Check that the version we're trying to upload is the current version + try: + version_info = yield self.get_version_info(user_id, version) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%d' not found" % (version,)) + + if version_info.version != version: + raise RoomKeysVersionError(current_version=version_info.version) + # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): @@ -91,3 +104,27 @@ class E2eRoomKeysHandler(object): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + + @defer.inlineCallbacks + def create_version(self, user_id, version, version_info): + + # TODO: Validate the JSON to make sure it has the right keys. + + # lock everyone out until we've switched version + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.create_version( + user_id, version, version_info + ) + + @defer.inlineCallbacks + def get_version_info(self, user_id, version): + with (yield self._upload_linearizer.queue(user_id)): + results = yield self.store.get_e2e_room_key_version( + user_id, version + ) + defer.returnValue(results) + + @defer.inlineCallbacks + def delete_version(self, user_id, version): + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.delete_e2e_room_key_version(user_id, version) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index be82eccb2b..4d76e1d824 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -221,5 +221,52 @@ class RoomKeysServlet(RestServlet): defer.returnValue((200, {})) +class RoomKeysVersionServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/room_keys/version(/(?P[^/]+))?$" + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysVersionServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + + @defer.inlineCallbacks + def on_POST(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + info = parse_json_object_from_request(request) + + new_version = yield self.e2e_room_keys_handler.create_version( + user_id, version, info + ) + defer.returnValue((200, {"version": new_version})) + + @defer.inlineCallbacks + def on_GET(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + info = yield self.e2e_room_keys_handler.get_version_info( + user_id, version + ) + defer.returnValue((200, info)) + + @defer.inlineCallbacks + def on_DELETE(self, request, version): + requester = yield self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + yield self.e2e_room_keys_handler.delete_version( + user_id, version + ) + defer.returnValue((200, {})) + + def register_servlets(hs, http_server): RoomKeysServlet(hs).register(http_server) + RoomKeysVersionServlet(hs).register(http_server) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 5982710bd5..994878acf6 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -170,3 +170,59 @@ class EndToEndRoomKeyStore(SQLBaseStore): keyvalues=keyvalues, desc="delete_e2e_room_keys", ) + + @defer.inlineCallbacks + def get_e2e_room_key_version(self, user_id, version): + + row = yield self._simple_select_one( + table="e2e_room_key_versions", + keyvalues={ + "user_id": user_id, + "version": version, + }, + retcols=( + "user_id", + "version", + "algorithm", + "auth_data", + ), + desc="get_e2e_room_key_version_info", + ) + + defer.returnValue(row) + + def create_e2e_room_key_version(self, user_id, version, info): + + def _create_e2e_room_key_version_txn(txn): + + self._simple_insert_txn( + txn, + table="e2e_room_key_versions", + values={ + "user_id": user_id, + "version": version, + "algorithm": info["algorithm"], + "auth_data": info["auth_data"], + }, + lock=False, + ) + + return True + + return self.runInteraction( + "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn + ) + + @defer.inlineCallbacks + def delete_e2e_room_key_version(self, user_id, version): + + keyvalues = { + "user_id": user_id, + "version": version, + } + + yield self._simple_delete( + table="e2e_room_key_versions", + keyvalues=keyvalues, + desc="delete_e2e_room_key_version", + ) diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 6b344c5ad7..463f828c66 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -34,7 +34,7 @@ CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, version INT NOT NULL, algorithm TEXT NOT NULL, - dummy_session_data TEXT NOT NULL + auth_data TEXT NOT NULL ); CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); -- cgit 1.5.1 From 69e51c7ba48a84b48ab64c8c290a232d14193a18 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 6 Dec 2017 10:02:49 +0100 Subject: make /room_keys/version work --- synapse/handlers/e2e_room_keys.py | 18 +++++++++++------- synapse/rest/client/v2_alpha/room_keys.py | 9 ++++++++- synapse/storage/e2e_room_keys.py | 22 +++++++++++++++++----- synapse/storage/schema/delta/46/e2e_room_keys.sql | 4 ++-- 4 files changed, 38 insertions(+), 15 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 4333ca610c..bd58be6558 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -48,13 +48,16 @@ class E2eRoomKeysHandler(object): # TODO: Validate the JSON to make sure it has the right keys. # Check that the version we're trying to upload is the current version + try: version_info = yield self.get_version_info(user_id, version) except StoreError as e: if e.code == 404: - raise SynapseError(404, "Version '%d' not found" % (version,)) + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e - if version_info.version != version: + if version_info['version'] != version: raise RoomKeysVersionError(current_version=version_info.version) # XXX: perhaps we should use a finer grained lock here? @@ -81,7 +84,7 @@ class E2eRoomKeysHandler(object): if e.code == 404: pass else: - raise + raise e # check whether we merge or not. spelling it out with if/elifs rather # than lots of booleans for legibility. @@ -106,20 +109,21 @@ class E2eRoomKeysHandler(object): ) @defer.inlineCallbacks - def create_version(self, user_id, version, version_info): + def create_version(self, user_id, version_info): # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version with (yield self._upload_linearizer.queue(user_id)): - yield self.store.create_version( - user_id, version, version_info + new_version = yield self.store.create_e2e_room_key_version( + user_id, version_info ) + defer.returnValue(new_version) @defer.inlineCallbacks def get_version_info(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_key_version( + results = yield self.store.get_e2e_room_key_version_info( user_id, version ) defer.returnValue(results) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 4d76e1d824..128b732fb1 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer +from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request ) @@ -237,15 +238,21 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, version): + if version: + raise SynapseError(405, "Cannot POST to a specific version") + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version( - user_id, version, info + user_id, info ) defer.returnValue((200, {"version": new_version})) + # we deliberately don't have a PUT /version, as these things really should + # be immutable to avoid people footgunning + @defer.inlineCallbacks def on_GET(self, request, version): requester = yield self.auth.get_user_by_req(request, allow_guest=False) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 994878acf6..8efca11a8c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -172,7 +172,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_key_version(self, user_id, version): + def get_e2e_room_key_version_info(self, user_id, version): row = yield self._simple_select_one( table="e2e_room_key_versions", @@ -191,23 +191,35 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) - def create_e2e_room_key_version(self, user_id, version, info): + def create_e2e_room_key_version(self, user_id, info): + """Atomically creates a new version of this user's e2e_room_keys store + with the given version info. + """ def _create_e2e_room_key_version_txn(txn): + txn.execute( + "SELECT MAX(version) FROM e2e_room_key_versions WHERE user_id=?", + (user_id,) + ) + current_version = txn.fetchone()[0] + if current_version is None: + current_version = 0 + + new_version = current_version + 1 + self._simple_insert_txn( txn, table="e2e_room_key_versions", values={ "user_id": user_id, - "version": version, + "version": new_version, "algorithm": info["algorithm"], "auth_data": info["auth_data"], }, - lock=False, ) - return True + return new_version return self.runInteraction( "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 463f828c66..0d2a85fbe6 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -18,7 +18,7 @@ CREATE TABLE e2e_room_keys ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, - version INT NOT NULL, + version TEXT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, @@ -32,7 +32,7 @@ CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); -- the metadata for each generation of encrypted e2e session backups CREATE TABLE e2e_room_key_versions ( user_id TEXT NOT NULL, - version INT NOT NULL, + version TEXT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL ); -- cgit 1.5.1 From 0abb205b47158a4160ddceb317c0245d640b6e3f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 18 Dec 2017 01:52:46 +0000 Subject: blindly incorporate PR review - needs testing & fixing --- synapse/api/errors.py | 11 ++- synapse/handlers/e2e_room_keys.py | 88 +++++++++++++++-------- synapse/rest/client/v2_alpha/room_keys.py | 2 + synapse/storage/e2e_room_keys.py | 69 ++++++++---------- synapse/storage/schema/delta/46/e2e_room_keys.sql | 8 +-- 5 files changed, 99 insertions(+), 79 deletions(-) (limited to 'synapse') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 8c97e91ba1..d37bcb4082 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -289,9 +289,14 @@ class LimitExceededError(SynapseError): class RoomKeysVersionError(SynapseError): """A client has tried to upload to a non-current version of the room_keys store """ - def __init__(self, code=403, msg="Wrong room_keys version", current_version=None, - errcode=Codes.WRONG_ROOM_KEYS_VERSION): - super(RoomKeysVersionError, self).__init__(code, msg, errcode) + def __init__(self, current_version): + """ + Args: + current_version (str): the current version of the store they should have used + """ + super(RoomKeysVersionError, self).__init__( + 403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION + ) self.current_version = current_version def error_dict(self): diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index bd58be6558..dda31fdd24 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -24,8 +24,21 @@ logger = logging.getLogger(__name__) class E2eRoomKeysHandler(object): + """ + Implements an optional realtime backup mechanism for encrypted E2E megolm room keys. + This gives a way for users to store and recover their megolm keys if they lose all + their clients. It should also extend easily to future room key mechanisms. + The actual payload of the encrypted keys is completely opaque to the handler. + """ + def __init__(self, hs): self.store = hs.get_datastore() + + # Used to lock whenever a client is uploading key data. This prevents collisions + # between clients trying to upload the details of a new session, given all + # clients belonging to a user will receive and try to upload a new session at + # roughly the same time. Also used to lock out uploads when the key is being + # changed. self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks @@ -40,33 +53,34 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id, session_id): - yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + # lock for consistency with uploading + with (yield self._upload_linearizer.queue(user_id)): + yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): # TODO: Validate the JSON to make sure it has the right keys. - # Check that the version we're trying to upload is the current version - - try: - version_info = yield self.get_version_info(user_id, version) - except StoreError as e: - if e.code == 404: - raise SynapseError(404, "Version '%s' not found" % (version,)) - else: - raise e - - if version_info['version'] != version: - raise RoomKeysVersionError(current_version=version_info.version) - # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): - - # go through the room_keys - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - room_key = room_keys['rooms'][room_id]['sessions'][session_id] + # Check that the version we're trying to upload is the current version + try: + version_info = yield self.get_version_info(user_id, version) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e + + if version_info['version'] != version: + raise RoomKeysVersionError(current_version=version_info.version) + + # go through the room_keys. + # XXX: this should/could be done concurrently, given we're in a lock. + for room_id, room in room_keys['rooms'].iteritems(): + for session_id, session in room['sessions'].iteritems(): + room_key = session[session_id] yield self._upload_room_key( user_id, version, room_id, session_id, room_key @@ -86,10 +100,29 @@ class E2eRoomKeysHandler(object): else: raise e - # check whether we merge or not. spelling it out with if/elifs rather - # than lots of booleans for legibility. - upsert = True + if _should_replace_room_key(current_room_key, room_key): + yield self.store.set_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) + + def _should_replace_room_key(current_room_key, room_key): + """ + Determine whether to replace the current_room_key in our backup for this + session (if any) with a new room_key that has been uploaded. + + Args: + current_room_key (dict): Optional, the current room_key dict if any + room_key (dict): The new room_key dict which may or may not be fit to + replace the current_room_key + + Returns: + True if current_room_key should be replaced by room_key in the backup + """ + if current_room_key: + # spelt out with if/elifs rather than nested boolean expressions + # purely for legibility. + if room_key['is_verified'] and not current_room_key['is_verified']: pass elif ( @@ -97,16 +130,11 @@ class E2eRoomKeysHandler(object): current_room_key['first_message_index'] ): pass - elif room_key['forwarded_count'] < room_key['forwarded_count']: + elif room_key['forwarded_count'] < current_room_key['forwarded_count']: pass else: - upsert = False - - # if so, we set the new room_key - if upsert: - yield self.store.set_e2e_room_key( - user_id, version, room_id, session_id, room_key - ) + return False + return True @defer.inlineCallbacks def create_version(self, user_id, version_info): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 128b732fb1..70b7b4573f 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -68,6 +68,8 @@ class RoomKeysServlet(RestServlet): * lower forwarded_count always wins over higher forwarded_count We trust the clients not to lie and corrupt their own backups. + It also means that if your access_token is stolen, the attacker could + delete your backup. POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1 Content-Type: application/json diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 8efca11a8c..c11417c415 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -44,30 +44,21 @@ class EndToEndRoomKeyStore(SQLBaseStore): def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): - def _set_e2e_room_key_txn(txn): - - self._simple_upsert_txn( - txn, - table="e2e_room_keys", - keyvalues={ - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - }, - values={ - "version": version, - "first_message_index": room_key['first_message_index'], - "forwarded_count": room_key['forwarded_count'], - "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], - }, - lock=False, - ) - - return True - - return self.runInteraction( - "set_e2e_room_key", _set_e2e_room_key_txn + yield self._simple_upsert( + table="e2e_room_keys", + keyvalues={ + "user_id": user_id, + "room_id": room_id, + "session_id": session_id, + }, + values={ + "version": version, + "first_message_index": room_key['first_message_index'], + "forwarded_count": room_key['forwarded_count'], + "is_verified": room_key['is_verified'], + "session_data": room_key['session_data'], + }, + lock=False, ) # XXX: this isn't currently used and isn't tested anywhere @@ -107,7 +98,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_keys(self, user_id, version, room_id, session_id): + def get_e2e_room_keys( + self, user_id, version, room_id=room_id, session_id=session_id + ): keyvalues = { "user_id": user_id, @@ -115,8 +108,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): } if room_id: keyvalues['room_id'] = room_id - if session_id: - keyvalues['session_id'] = session_id + if session_id: + keyvalues['session_id'] = session_id rows = yield self._simple_select_list( table="e2e_room_keys", @@ -133,18 +126,10 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - # perlesque autovivification from https://stackoverflow.com/a/19829714/6764493 - class AutoVivification(dict): - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - value = self[item] = type(self)() - return value - - sessions = AutoVivification() + sessions = {} for row in rows: - sessions['rooms'][row['room_id']]['sessions'][row['session_id']] = { + room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) + room_entry['sessions'][row['session_id']] = { "first_message_index": row["first_message_index"], "forwarded_count": row["forwarded_count"], "is_verified": row["is_verified"], @@ -154,7 +139,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(sessions) @defer.inlineCallbacks - def delete_e2e_room_keys(self, user_id, version, room_id, session_id): + def delete_e2e_room_keys( + self, user_id, version, room_id=room_id, session_id=session_id + ): keyvalues = { "user_id": user_id, @@ -162,8 +149,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): } if room_id: keyvalues['room_id'] = room_id - if session_id: - keyvalues['session_id'] = session_id + if session_id: + keyvalues['session_id'] = session_id yield self._simple_delete( table="e2e_room_keys", diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 0d2a85fbe6..16499ac34c 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -25,16 +25,14 @@ CREATE TABLE e2e_room_keys ( session_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_user_idx ON e2e_room_keys(user_id); -CREATE UNIQUE INDEX e2e_room_keys_room_idx ON e2e_room_keys(room_id); -CREATE UNIQUE INDEX e2e_room_keys_session_idx ON e2e_room_keys(session_id); +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); -- the metadata for each generation of encrypted e2e session backups -CREATE TABLE e2e_room_key_versions ( +CREATE TABLE e2e_room_keys_versions ( user_id TEXT NOT NULL, version TEXT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_key_user_idx ON e2e_room_keys(user_id); +CREATE UNIQUE INDEX e2e_room_keys_versions_user_idx ON e2e_room_keys_versions(user_id); -- cgit 1.5.1 From cac02537998718c05a561918269745161378dd6f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 18 Dec 2017 01:58:53 +0000 Subject: rename room_key_version table correctly, and fix opt args --- synapse/handlers/e2e_room_keys.py | 6 +++--- synapse/storage/e2e_room_keys.py | 26 +++++++++++++------------- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index dda31fdd24..87be081b1c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -143,7 +143,7 @@ class E2eRoomKeysHandler(object): # lock everyone out until we've switched version with (yield self._upload_linearizer.queue(user_id)): - new_version = yield self.store.create_e2e_room_key_version( + new_version = yield self.store.create_e2e_room_keys_version( user_id, version_info ) defer.returnValue(new_version) @@ -151,7 +151,7 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def get_version_info(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_key_version_info( + results = yield self.store.get_e2e_room_keys_version_info( user_id, version ) defer.returnValue(results) @@ -159,4 +159,4 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def delete_version(self, user_id, version): with (yield self._upload_linearizer.queue(user_id)): - yield self.store.delete_e2e_room_key_version(user_id, version) + yield self.store.delete_e2e_room_keys_version(user_id, version) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c11417c415..7e1cb13e74 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -99,7 +99,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys( - self, user_id, version, room_id=room_id, session_id=session_id + self, user_id, version, room_id=None, session_id=None ): keyvalues = { @@ -140,7 +140,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def delete_e2e_room_keys( - self, user_id, version, room_id=room_id, session_id=session_id + self, user_id, version, room_id=None, session_id=None ): keyvalues = { @@ -159,10 +159,10 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_e2e_room_key_version_info(self, user_id, version): + def get_e2e_room_keys_version_info(self, user_id, version): row = yield self._simple_select_one( - table="e2e_room_key_versions", + table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": version, @@ -173,20 +173,20 @@ class EndToEndRoomKeyStore(SQLBaseStore): "algorithm", "auth_data", ), - desc="get_e2e_room_key_version_info", + desc="get_e2e_room_keys_version_info", ) defer.returnValue(row) - def create_e2e_room_key_version(self, user_id, info): + def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. """ - def _create_e2e_room_key_version_txn(txn): + def _create_e2e_room_keys_version_txn(txn): txn.execute( - "SELECT MAX(version) FROM e2e_room_key_versions WHERE user_id=?", + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) ) current_version = txn.fetchone()[0] @@ -197,7 +197,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): self._simple_insert_txn( txn, - table="e2e_room_key_versions", + table="e2e_room_keys_versions", values={ "user_id": user_id, "version": new_version, @@ -209,11 +209,11 @@ class EndToEndRoomKeyStore(SQLBaseStore): return new_version return self.runInteraction( - "create_e2e_room_key_version_txn", _create_e2e_room_key_version_txn + "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) @defer.inlineCallbacks - def delete_e2e_room_key_version(self, user_id, version): + def delete_e2e_room_keys_version(self, user_id, version): keyvalues = { "user_id": user_id, @@ -221,7 +221,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): } yield self._simple_delete( - table="e2e_room_key_versions", + table="e2e_room_keys_versions", keyvalues=keyvalues, - desc="delete_e2e_room_key_version", + desc="delete_e2e_room_keys_version", ) -- cgit 1.5.1 From ca0b052307de8868d3e337f1ace5667dad740ab1 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 15:03:44 +0000 Subject: fix factoring out of _should_replace_room_key --- synapse/handlers/e2e_room_keys.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 87be081b1c..b67d6a2a7e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -100,15 +100,16 @@ class E2eRoomKeysHandler(object): else: raise e - if _should_replace_room_key(current_room_key, room_key): + if E2eRoomKeysHandler._should_replace_room_key(current_room_key, room_key): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + @staticmethod def _should_replace_room_key(current_room_key, room_key): """ - Determine whether to replace the current_room_key in our backup for this - session (if any) with a new room_key that has been uploaded. + Determine whether to replace a given current_room_key (if any) + with a newly uploaded room_key backup Args: current_room_key (dict): Optional, the current room_key dict if any -- cgit 1.5.1 From 8d14598e90396c52c9394cf807861921e053d8da Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 16:44:18 +0000 Subject: add storage docstring; remove unused set_e2e_room_keys --- synapse/storage/e2e_room_keys.py | 119 +++++++++++++++++++++++++++------------ 1 file changed, 83 insertions(+), 36 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 7e1cb13e74..45d2c9b433 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -22,6 +22,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_key(self, user_id, version, room_id, session_id): + """Get the encrypted E2E room key for a given session from a given + backup version of room_keys. We only store the 'best' room key for a given + session at a given time, as determined by the handler. + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup for the set of keys we're querying + room_id(str): the ID of the room whose keys we're querying. + This is a bit redundant as it's implied by the session_id, but + we include for consistency with the rest of the API. + session_id(str): the session whose room_key we're querying. + + Returns: + A deferred dict giving the session_data and message metadata for + this room key. + """ row = yield self._simple_select_one( table="e2e_room_keys", @@ -43,6 +59,17 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): + """Replaces or inserts the encrypted E2E room key for a given session in + a given backup + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_id(str): the ID of the room whose keys we're setting + session_id(str): the session whose room_key we're setting + Raises: + StoreError if stuff goes wrong, probably + """ yield self._simple_upsert( table="e2e_room_keys", @@ -61,46 +88,27 @@ class EndToEndRoomKeyStore(SQLBaseStore): lock=False, ) - # XXX: this isn't currently used and isn't tested anywhere - # it could be used in future for bulk-uploading new versions of room_keys - # for a user or something though. - def set_e2e_room_keys(self, user_id, version, room_keys): - - def _set_e2e_room_keys_txn(txn): - - values = [] - for room_id in room_keys['rooms']: - for session_id in room_keys['rooms'][room_id]['sessions']: - session = room_keys['rooms'][room_id]['sessions'][session_id] - values.append( - { - "user_id": user_id, - "room_id": room_id, - "session_id": session_id, - "version": version, - "first_message_index": session['first_message_index'], - "forwarded_count": session['forwarded_count'], - "is_verified": session['is_verified'], - "session_data": session['session_data'], - } - ) - - self._simple_insert_many_txn( - txn, - table="e2e_room_keys", - values=values - ) - - return True - - return self.runInteraction( - "set_e2e_room_keys", _set_e2e_room_keys_txn - ) - @defer.inlineCallbacks def get_e2e_room_keys( self, user_id, version, room_id=None, session_id=None ): + """Bulk get the E2E room keys for a given backup, optionally filtered to a given + room, or a given session. + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup for the set of keys we're querying + room_id(str): Optional. the ID of the room whose keys we're querying, if any. + If not specified, we return the keys for all the rooms in the backup. + session_id(str): Optional. the session whose room_key we're querying, if any. + If specified, we also require the room_id to be specified. + If not specified, we return all the keys in this version of + the backup (or for the specified room) + + Returns: + A deferred list of dicts giving the session_data and message metadata for + these room keys. + """ keyvalues = { "user_id": user_id, @@ -142,6 +150,22 @@ class EndToEndRoomKeyStore(SQLBaseStore): def delete_e2e_room_keys( self, user_id, version, room_id=None, session_id=None ): + """Bulk delete the E2E room keys for a given backup, optionally filtered to a given + room or a given session. + + Args: + user_id(str): the user whose backup we're deleting from + version(str): the version ID of the backup for the set of keys we're deleting + room_id(str): Optional. the ID of the room whose keys we're deleting, if any. + If not specified, we delete the keys for all the rooms in the backup. + session_id(str): Optional. the session whose room_key we're querying, if any. + If specified, we also require the room_id to be specified. + If not specified, we delete all the keys in this version of + the backup (or for the specified room) + + Returns: + A deferred of the deletion transaction + """ keyvalues = { "user_id": user_id, @@ -160,6 +184,15 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def get_e2e_room_keys_version_info(self, user_id, version): + """Get info etadata about a given version of our room_keys backup + + Args: + user_id(str): the user whose backup we're querying + version(str): the version ID of the backup we're querying about + + Returns: + A deferred dict giving the info metadata for this backup version + """ row = yield self._simple_select_one( table="e2e_room_keys_versions", @@ -181,6 +214,13 @@ class EndToEndRoomKeyStore(SQLBaseStore): def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. + + Args: + user_id(str): the user whose backup we're creating a version + info(dict): the info about the backup version to be created + + Returns: + A deferred string for the newly created version ID """ def _create_e2e_room_keys_version_txn(txn): @@ -214,6 +254,13 @@ class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks def delete_e2e_room_keys_version(self, user_id, version): + """Delete a given backup version of the user's room keys. + Doesn't delete their actual key data. + + Args: + user_id(str): the user whose backup version we're deleting + version(str): the ID of the backup version we're deleting + """ keyvalues = { "user_id": user_id, -- cgit 1.5.1 From 9f500cb39efa608c02f212711a5ad2177757bf4b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 24 Dec 2017 17:42:17 +0000 Subject: more docstring for the e2e_room_keys rest --- synapse/handlers/e2e_room_keys.py | 2 -- synapse/rest/client/v2_alpha/room_keys.py | 51 +++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index b67d6a2a7e..7a940d1c21 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -59,7 +59,6 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): - # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? @@ -139,7 +138,6 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def create_version(self, user_id, version_info): - # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 70b7b4573f..04547c7d43 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -208,6 +208,10 @@ class RoomKeysServlet(RestServlet): """ Deletes one or more encrypted E2E room keys for a user for backup purposes. + DELETE /room_keys/keys/!abc:matrix.org/c0ff33?version=1 + HTTP/1.1 200 OK + {} + room_id: the ID of the room whose keys to delete (optional) session_id: the ID for the E2E session to delete (optional) version: the version of the user's backup which this data is for. @@ -240,6 +244,33 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, version): + """ + Create a new backup version for this user's room_keys with the given + info. The version is allocated by the server and returned to the user + in the response. This API is intended to be used whenever the user + changes the encryption key for their backups, ensuring that backups + encrypted with different keys don't collide. + + The algorithm passed in the version info is a reverse-DNS namespaced + identifier to describe the format of the encrypted backupped keys. + + The auth_data is { user_id: "user_id", nonce: } + encrypted using the algorithm and current encryption key described above. + + POST /room_keys/version + Content-Type: application/json + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + + HTTP/1.1 200 OK + Content-Type: application/json + { + "version": 12345 + } + """ + if version: raise SynapseError(405, "Cannot POST to a specific version") @@ -257,6 +288,17 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, version): + """ + Retrieve the version information about a given version of the user's + room_keys backup. + + GET /room_keys/version/12345 HTTP/1.1 + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -267,6 +309,15 @@ class RoomKeysVersionServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, version): + """ + Delete the information about a given version of the user's + room_keys backup. Doesn't delete the actual room data. + + DELETE /room_keys/version/12345 HTTP/1.1 + HTTP/1.1 200 OK + {} + """ + requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() -- cgit 1.5.1 From 9f0791b7bd030a70182cd9b33bbe2f78dba705dd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:35:10 +0000 Subject: add a tonne of docstring; make upload_room_keys properly assert version --- synapse/storage/e2e_room_keys.py | 51 ++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 20 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 45d2c9b433..e04e6a3690 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -67,6 +67,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): version(str): the version ID of the backup we're updating room_id(str): the ID of the room whose keys we're setting session_id(str): the session whose room_key we're setting + room_key(dict): the room_key being set Raises: StoreError if stuff goes wrong, probably """ @@ -182,35 +183,46 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="delete_e2e_room_keys", ) - @defer.inlineCallbacks - def get_e2e_room_keys_version_info(self, user_id, version): - """Get info etadata about a given version of our room_keys backup + def get_e2e_room_keys_version_info(self, user_id, version=None): + """Get info metadata about a version of our room_keys backup. Args: user_id(str): the user whose backup we're querying - version(str): the version ID of the backup we're querying about - + version(str): Optional. the version ID of the backup we're querying about + If missing, we return the information about the current version. + Raises: + StoreError: with code 404 if there are no e2e_room_keys_versions present Returns: A deferred dict giving the info metadata for this backup version """ - row = yield self._simple_select_one( - table="e2e_room_keys_versions", - keyvalues={ - "user_id": user_id, - "version": version, - }, - retcols=( - "user_id", - "version", - "algorithm", - "auth_data", - ), + def _get_e2e_room_keys_version_info_txn(txn): + if version is None: + txn.execute( + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + (user_id,) + ) + version = txn.fetchone()[0] + + return self._simple_select_one_txn( + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": version, + }, + retcols=( + "user_id", + "version", + "algorithm", + "auth_data", + ), + ) + + return self.runInteraction( desc="get_e2e_room_keys_version_info", + _get_e2e_room_keys_version_info_txn ) - defer.returnValue(row) - def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. @@ -224,7 +236,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): """ def _create_e2e_room_keys_version_txn(txn): - txn.execute( "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) -- cgit 1.5.1 From 14b3da63a339333292a83410c0ba3148bcb644ba Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:37:44 +0000 Subject: add a tonne of docstring; make upload_room_keys properly assert version --- synapse/handlers/e2e_room_keys.py | 111 +++++++++++++++++++++++++++--- synapse/rest/client/v2_alpha/room_keys.py | 11 ++- 2 files changed, 113 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 7a940d1c21..2fa025bfc7 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -42,7 +42,16 @@ class E2eRoomKeysHandler(object): self._upload_linearizer = Linearizer("upload_room_keys_lock") @defer.inlineCallbacks - def get_room_keys(self, user_id, version, room_id, session_id): + def get_room_keys(self, user_id, version, room_id=None, session_id=None): + """Bulk get the E2E room keys for a given backup, optionally filtered to a given + room, or a given session. + See EndToEndRoomKeyStore.get_e2e_room_keys for full details. + + Returns: + A deferred list of dicts giving the session_data and message metadata for + these room keys. + """ + # we deliberately take the lock to get keys so that changing the version # works atomically with (yield self._upload_linearizer.queue(user_id)): @@ -52,20 +61,56 @@ class E2eRoomKeysHandler(object): defer.returnValue(results) @defer.inlineCallbacks - def delete_room_keys(self, user_id, version, room_id, session_id): + def delete_room_keys(self, user_id, version, room_id=None, session_id=None): + """Bulk delete the E2E room keys for a given backup, optionally filtered to a given + room or a given session. + See EndToEndRoomKeyStore.delete_e2e_room_keys for full details. + + Returns: + A deferred of the deletion transaction + """ + # lock for consistency with uploading with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): + """Bulk upload a list of room keys into a given backup version, asserting + that the given version is the current backup version. room_keys are merged + into the current backup as described in RoomKeysServlet.on_PUT(). + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_keys(dict): a nested dict describing the room_keys we're setting: + + { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": false, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } + } + + Raises: + SynapseError: with code 404 if there are no versions defined + RoomKeysVersionError: if the uploaded version is not the current version + """ + # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version - try: - version_info = yield self.get_version_info(user_id, version) + version_info = yield self.get_current_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -87,6 +132,17 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def _upload_room_key(self, user_id, version, room_id, session_id, room_key): + """Upload a given room_key for a given room and session into a given + version of the backup. Merges the key with any which might already exist. + + Args: + user_id(str): the user whose backup we're setting + version(str): the version ID of the backup we're updating + room_id(str): the ID of the room whose keys we're setting + session_id(str): the session whose room_key we're setting + room_key(dict): the room_key being set + """ + # get the room_key for this particular row current_room_key = None try: @@ -138,6 +194,23 @@ class E2eRoomKeysHandler(object): @defer.inlineCallbacks def create_version(self, user_id, version_info): + """Create a new backup version. This automatically becomes the new + backup version for the user's keys; previous backups will no longer be + writeable to. + + Args: + user_id(str): the user whose backup version we're creating + version_info(dict): metadata about the new version being created + + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + + Returns: + A deferred of a string that gives the new version number. + """ + # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version @@ -148,14 +221,36 @@ class E2eRoomKeysHandler(object): defer.returnValue(new_version) @defer.inlineCallbacks - def get_version_info(self, user_id, version): + def get_current_version_info(self, user_id): + """Get the user's current backup version. + + Args: + user_id(str): the user whose current backup version we're querying + Raises: + StoreError: code 404 if there is no current backup version + Returns: + A deferred of a info dict that gives the info about the new version. + + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_keys_version_info( - user_id, version - ) + results = yield self.store.get_e2e_room_keys_version_info(user_id) defer.returnValue(results) @defer.inlineCallbacks def delete_version(self, user_id, version): + """Deletes a given version of the user's e2e_room_keys backup + + Args: + user_id(str): the user whose current backup version we're deleting + version(str): the version id of the backup being deleted + Raises: + StoreError: code 404 if this backup version doesn't exist + """ + with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys_version(user_id, version) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 04547c7d43..d3f857aba2 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -47,7 +47,7 @@ class RoomKeysServlet(RestServlet): room_id: the ID of the room the keys are for (optional) session_id: the ID for the E2E room keys for the room (optional) version: the version of the user's backup which this data is for. - the version must already have been created via the /change_secret API. + the version must already have been created via the /room_keys/version API. Each session has: * first_message_index: a numeric index indicating the oldest message @@ -59,6 +59,9 @@ class RoomKeysServlet(RestServlet): * session_data: base64-encrypted data describing the session. Returns 200 OK on success with body {} + Returns 403 Forbidden if the version in question is not the most recently + created version (i.e. if this is an old client trying to write to a stale backup) + Returns 404 Not Found if the version in question doesn't exist The API is designed to be otherwise agnostic to the room_key encryption algorithm being used. Sessions are merged with existing ones in the @@ -251,6 +254,9 @@ class RoomKeysVersionServlet(RestServlet): changes the encryption key for their backups, ensuring that backups encrypted with different keys don't collide. + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + The algorithm passed in the version info is a reverse-DNS namespaced identifier to describe the format of the encrypted backupped keys. @@ -292,6 +298,9 @@ class RoomKeysVersionServlet(RestServlet): Retrieve the version information about a given version of the user's room_keys backup. + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + GET /room_keys/version/12345 HTTP/1.1 { "algorithm": "m.megolm_backup.v1", -- cgit 1.5.1 From 234611f3472b229d9e3a9ec7a27c51446f6a61ba Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:42:08 +0000 Subject: fix typos --- synapse/handlers/e2e_room_keys.py | 3 ++- synapse/storage/e2e_room_keys.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 2fa025bfc7..6446c3c6c3 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -110,7 +110,8 @@ class E2eRoomKeysHandler(object): # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version - version_info = yield self.get_current_version_info(user_id) + try: + version_info = yield self.get_current_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index e04e6a3690..b51faa1204 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -219,7 +219,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) return self.runInteraction( - desc="get_e2e_room_keys_version_info", + "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn ) -- cgit 1.5.1 From 982edca38026e2bb9085e77b4c25af60c4793f34 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 27 Dec 2017 23:58:51 +0000 Subject: fix flakes --- synapse/handlers/e2e_room_keys.py | 4 ++-- synapse/storage/e2e_room_keys.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 6446c3c6c3..fdae69c600 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -136,7 +136,7 @@ class E2eRoomKeysHandler(object): """Upload a given room_key for a given room and session into a given version of the backup. Merges the key with any which might already exist. - Args: + Args: user_id(str): the user whose backup we're setting version(str): the version ID of the backup we're updating room_id(str): the ID of the room whose keys we're setting @@ -199,7 +199,7 @@ class E2eRoomKeysHandler(object): backup version for the user's keys; previous backups will no longer be writeable to. - Args: + Args: user_id(str): the user whose backup version we're creating version_info(dict): metadata about the new version being created diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index b51faa1204..7ab75070a2 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -202,13 +202,15 @@ class EndToEndRoomKeyStore(SQLBaseStore): "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,) ) - version = txn.fetchone()[0] + this_version = txn.fetchone()[0] + else: + this_version = version return self._simple_select_one_txn( table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, - "version": version, + "version": this_version, }, retcols=( "user_id", -- cgit 1.5.1 From 5e42c45c96bb62a86604118250e9cb6c57b94254 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:10:31 +0000 Subject: switch get_current_version_info back to being get_version_info --- synapse/handlers/e2e_room_keys.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index fdae69c600..f08d80da3e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -111,7 +111,7 @@ class E2eRoomKeysHandler(object): with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version try: - version_info = yield self.get_current_version_info(user_id) + version_info = yield self.get_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -222,17 +222,20 @@ class E2eRoomKeysHandler(object): defer.returnValue(new_version) @defer.inlineCallbacks - def get_current_version_info(self, user_id): - """Get the user's current backup version. + def get_version_info(self, user_id, version=None): + """Get the info about a given version of the user's backup Args: user_id(str): the user whose current backup version we're querying + version(str): Optional; if None gives the most recent version + otherwise a historical one. Raises: - StoreError: code 404 if there is no current backup version + StoreError: code 404 if the requested backup version doesn't exist Returns: A deferred of a info dict that gives the info about the new version. { + "version": "1234", "algorithm": "m.megolm_backup.v1", "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } -- cgit 1.5.1 From 93d174bcc4cf773bea2534d3683a1e91e5489c89 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:10:49 +0000 Subject: improve docstring --- synapse/rest/client/v2_alpha/room_keys.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index d3f857aba2..ca69ced1e3 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -296,13 +296,17 @@ class RoomKeysVersionServlet(RestServlet): def on_GET(self, request, version): """ Retrieve the version information about a given version of the user's - room_keys backup. + room_keys backup. If the version part is missing, returns info about the + most current backup version (if any) It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. + Returns 404 is the given version does not exist. + GET /room_keys/version/12345 HTTP/1.1 { + "version": "12345", "algorithm": "m.megolm_backup.v1", "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } -- cgit 1.5.1 From b5eee511c73fdd9b5d1a7453433513791192a250 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:11:02 +0000 Subject: don't needlessly return user_id --- synapse/storage/e2e_room_keys.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 7ab75070a2..3c720f3b3e 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -213,7 +213,6 @@ class EndToEndRoomKeyStore(SQLBaseStore): "version": this_version, }, retcols=( - "user_id", "version", "algorithm", "auth_data", -- cgit 1.5.1 From 15d513f16fd272fb3763a42f05a8f296d4e1abf0 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 14:35:25 +0000 Subject: fix idiocies and so make tests pass --- synapse/storage/e2e_room_keys.py | 5 +++-- synapse/storage/schema/delta/46/e2e_room_keys.sql | 2 +- tests/handlers/test_e2e_room_keys.py | 19 +++++++++++-------- 3 files changed, 15 insertions(+), 11 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 3c720f3b3e..e4d56b7c37 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -207,6 +207,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): this_version = version return self._simple_select_one_txn( + txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, @@ -243,9 +244,9 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) current_version = txn.fetchone()[0] if current_version is None: - current_version = 0 + current_version = '0' - new_version = current_version + 1 + new_version = str(int(current_version) + 1) self._simple_insert_txn( txn, diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql index 16499ac34c..4531fd56ee 100644 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/46/e2e_room_keys.sql @@ -35,4 +35,4 @@ CREATE TABLE e2e_room_keys_versions ( auth_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_versions_user_idx ON e2e_room_keys_versions(user_id); +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 9d3bef6db2..6f4b3a147a 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -37,7 +37,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): handlers=None, replication_layer=mock.Mock(), ) - self.handler = synapse.handlers.e2e_keys.E2eRoomKeysHandler(self.hs) + self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) @defer.inlineCallbacks @@ -46,6 +46,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): if there is no version. """ local_user = "@boris:" + self.hs.hostname + res = None try: res = yield self.handler.get_version_info(local_user); except errors.SynapseError as e: @@ -58,6 +59,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): if it doesn't exist. """ local_user = "@boris:" + self.hs.hostname + res = None try: res = yield self.handler.get_version_info(local_user, "mrflibble"); except errors.SynapseError as e: @@ -69,7 +71,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we can create and then retrieve versions. """ local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }); @@ -92,7 +94,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }); # upload a new one... - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", }); @@ -111,7 +113,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we can create and then delete versions. """ local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(user_id, { + res = yield self.handler.create_version(local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }); @@ -121,21 +123,22 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): yield self.handler.delete_version(local_user, "1"); # check that it's gone + res = None try: res = yield self.handler.get_version_info(local_user, "1"); except errors.SynapseError as e: self.assertEqual(e.code, 404); - self.assertEqual(res, None); + self.assertEqual(res, None); @defer.inlineCallbacks def test_get_room_keys(self): - pass + yield None @defer.inlineCallbacks def test_upload_room_keys(self): - pass + yield None @defer.inlineCallbacks def test_delete_room_keys(self): - pass + yield None -- cgit 1.5.1 From fe87890b18f57f0268bd65aeca881e7817bbe9e4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 17:47:11 +0000 Subject: implement remaining tests and make them work --- synapse/handlers/e2e_room_keys.py | 35 +++- synapse/rest/client/v2_alpha/room_keys.py | 6 + synapse/storage/e2e_room_keys.py | 3 +- tests/handlers/test_e2e_room_keys.py | 276 +++++++++++++++++++++++++++--- 4 files changed, 287 insertions(+), 33 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index f08d80da3e..09c2888db6 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -58,6 +58,10 @@ class E2eRoomKeysHandler(object): results = yield self.store.get_e2e_room_keys( user_id, version, room_id, session_id ) + + if results['rooms'] == {}: + raise SynapseError(404, "No room_keys found") + defer.returnValue(results) @defer.inlineCallbacks @@ -109,9 +113,10 @@ class E2eRoomKeysHandler(object): # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): + # Check that the version we're trying to upload is the current version try: - version_info = yield self.get_version_info(user_id) + version_info = yield self._get_version_info_unlocked(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) @@ -119,16 +124,23 @@ class E2eRoomKeysHandler(object): raise e if version_info['version'] != version: - raise RoomKeysVersionError(current_version=version_info.version) + # Check that the version we're trying to upload actually exists + try: + version_info = yield self._get_version_info_unlocked(user_id, version) + # if we get this far, the version must exist + raise RoomKeysVersionError(current_version=version_info['version']) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Version '%s' not found" % (version,)) + else: + raise e # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. for room_id, room in room_keys['rooms'].iteritems(): for session_id, session in room['sessions'].iteritems(): - room_key = session[session_id] - yield self._upload_room_key( - user_id, version, room_id, session_id, room_key + user_id, version, room_id, session_id, session ) @defer.inlineCallbacks @@ -242,8 +254,17 @@ class E2eRoomKeysHandler(object): """ with (yield self._upload_linearizer.queue(user_id)): - results = yield self.store.get_e2e_room_keys_version_info(user_id) - defer.returnValue(results) + res = yield self._get_version_info_unlocked(user_id, version) + defer.returnValue(res) + + @defer.inlineCallbacks + def _get_version_info_unlocked(self, user_id, version=None): + """Get the info about a given version of the user's backup + without obtaining the upload_linearizer lock. For params see get_version_info + """ + + results = yield self.store.get_e2e_room_keys_version_info(user_id, version) + defer.returnValue(results) @defer.inlineCallbacks def delete_version(self, user_id, version): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index ca69ced1e3..8f10e4e1cd 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -204,6 +204,12 @@ class RoomKeysServlet(RestServlet): room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id ) + + if session_id: + room_keys = room_keys['rooms'][room_id]['sessions'][session_id] + elif room_id: + room_keys = room_keys['rooms'][room_id] + defer.returnValue((200, room_keys)) @defer.inlineCallbacks diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index e4d56b7c37..8e8e4e457c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -58,6 +58,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): defer.returnValue(row) + @defer.inlineCallbacks def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): """Replaces or inserts the encrypted E2E room key for a given session in a given backup @@ -135,7 +136,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = {} + sessions = { 'rooms': {} } for row in rows: room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) room_entry['sessions'][row['session_id']] = { diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index afe6ecf27b..3cbfd6f9d0 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -17,6 +17,7 @@ import mock from synapse.api import errors from twisted.internet import defer +import copy import synapse.api.errors import synapse.handlers.e2e_room_keys @@ -25,6 +26,22 @@ import synapse.storage from tests import unittest, utils +# sample room_key data for use in the tests +room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK" + } + } + } + } +} + class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) @@ -38,46 +55,44 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): replication_layer=mock.Mock(), ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) + self.local_user = "@boris:" + self.hs.hostname; @defer.inlineCallbacks def test_get_missing_current_version_info(self): """Check that we get a 404 if we ask for info about the current version if there is no version. """ - local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user) + yield self.handler.get_version_info(self.local_user) except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks def test_get_missing_version_info(self): """Check that we get a 404 if we ask for info about a specific version if it doesn't exist. """ - local_user = "@boris:" + self.hs.hostname res = None try: - res = yield self.handler.get_version_info(local_user, "mrflibble") + yield self.handler.get_version_info(self.local_user, "bogus_version") except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks def test_create_version(self): """Check that we can create and then retrieve versions. """ - local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }) self.assertEqual(res, "1") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user) + res = yield self.handler.get_version_info(self.local_user) self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", @@ -85,7 +100,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }) # check we can retrieve it as a specific version - res = yield self.handler.get_version_info(local_user, "1") + res = yield self.handler.get_version_info(self.local_user, "1") self.assertDictEqual(res, { "version": "1", "algorithm": "m.megolm_backup.v1", @@ -93,14 +108,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): }) # upload a new one... - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", }) self.assertEqual(res, "2") # check we can retrieve it as the current version - res = yield self.handler.get_version_info(local_user) + res = yield self.handler.get_version_info(self.local_user) self.assertDictEqual(res, { "version": "2", "algorithm": "m.megolm_backup.v1", @@ -111,32 +126,243 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_delete_version(self): """Check that we can create and then delete versions. """ - local_user = "@boris:" + self.hs.hostname - res = yield self.handler.create_version(local_user, { + res = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }) self.assertEqual(res, "1") # check we can delete it - yield self.handler.delete_version(local_user, "1") + yield self.handler.delete_version(self.local_user, "1") # check that it's gone res = None try: - res = yield self.handler.get_version_info(local_user, "1") + yield self.handler.get_version_info(self.local_user, "1") except errors.SynapseError as e: - self.assertEqual(e.code, 404) - self.assertEqual(res, None) + res = e.code + self.assertEqual(res, 404) @defer.inlineCallbacks - def test_get_room_keys(self): - yield None + def test_get_missing_room_keys(self): + """Check that we get a 404 on querying missing room_keys + """ + res = None + try: + yield self.handler.get_room_keys(self.local_user, "bogus_version") + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check we also get a 404 even if the version is valid + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + res = None + try: + yield self.handler.get_room_keys(self.local_user, version) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # TODO: test the locking semantics when uploading room_keys, + # although this is probably best done in sytest @defer.inlineCallbacks - def test_upload_room_keys(self): - yield None + def test_upload_room_keys_no_versions(self): + """Check that we get a 404 on uploading keys when no versions are defined + """ + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_upload_room_keys_bogus_version(self): + """Check that we get a 404 on uploading keys when an nonexistent version is specified + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "bogus_version", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_upload_room_keys_wrong_version(self): + """Check that we get a 403 on uploading keys for an old version + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }) + self.assertEqual(version, "2") + + res = None + try: + yield self.handler.upload_room_keys(self.local_user, "1", room_keys) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 403) + + @defer.inlineCallbacks + def test_upload_room_keys_insert(self): + """Check that we can insert and retrieve keys for a session + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertDictEqual(res, room_keys) + + # check getting room_keys for a given room + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org" + ) + self.assertDictEqual(res, room_keys) + + # check getting room_keys for a given session_id + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + self.assertDictEqual(res, room_keys) + + @defer.inlineCallbacks + def test_upload_room_keys_merge(self): + """Check that we can upload a new room_key for an existing session and + have it correctly merged""" + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + + new_room_keys = copy.deepcopy(room_keys) + + # test that increasing the message_index doesn't replace the existing session + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['first_message_index'] = 2 + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'new' + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "SSBBTSBBIEZJU0gK" + ) + + # test that marking the session as verified however /does/ replace it + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['is_verified'] = True + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "new" + ) + + # test that a session with a higher forwarded_count doesn't replace one + # with a lower forwarding count + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['forwarded_count'] = 2 + new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'other' + yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) + + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertEqual( + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], + "new" + ) + + # TODO: check edge cases as well as the common variations here @defer.inlineCallbacks def test_delete_room_keys(self): - yield None + """Check that we can insert and delete keys for a session + """ + version = yield self.handler.create_version(self.local_user, { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }) + self.assertEqual(version, "1") + + # check for bulk-delete + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys(self.local_user, version) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check for bulk-delete per room + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + ) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + # check for bulk-delete per session + yield self.handler.upload_room_keys(self.local_user, version, room_keys) + yield self.handler.delete_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + res = None + try: + yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) -- cgit 1.5.1 From edc427a35187e462458d188039e761ffdb7e486d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 31 Dec 2017 17:50:55 +0000 Subject: flake8 --- synapse/storage/e2e_room_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 8e8e4e457c..c2f226396d 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -136,7 +136,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_keys", ) - sessions = { 'rooms': {} } + sessions = {'rooms': {}} for row in rows: room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}}) room_entry['sessions'][row['session_id']] = { diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 3cbfd6f9d0..6e43543ed9 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -42,6 +42,7 @@ room_keys = { } } + class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) @@ -55,7 +56,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): replication_layer=mock.Mock(), ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) - self.local_user = "@boris:" + self.hs.hostname; + self.local_user = "@boris:" + self.hs.hostname @defer.inlineCallbacks def test_get_missing_current_version_info(self): @@ -184,7 +185,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def test_upload_room_keys_bogus_version(self): - """Check that we get a 404 on uploading keys when an nonexistent version is specified + """Check that we get a 404 on uploading keys when an nonexistent version + is specified """ version = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", @@ -194,7 +196,9 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = None try: - yield self.handler.upload_room_keys(self.local_user, "bogus_version", room_keys) + yield self.handler.upload_room_keys( + self.local_user, "bogus_version", room_keys + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 404) @@ -267,10 +271,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): yield self.handler.upload_room_keys(self.local_user, version, room_keys) new_room_keys = copy.deepcopy(room_keys) + new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33'] # test that increasing the message_index doesn't replace the existing session - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['first_message_index'] = 2 - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'new' + new_room_key['first_message_index'] = 2 + new_room_key['session_data'] = 'new' yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) @@ -280,7 +285,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): ) # test that marking the session as verified however /does/ replace it - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['is_verified'] = True + new_room_key['is_verified'] = True yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) @@ -291,8 +296,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # test that a session with a higher forwarded_count doesn't replace one # with a lower forwarding count - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['forwarded_count'] = 2 - new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'] = 'other' + new_room_key['forwarded_count'] = 2 + new_room_key['session_data'] = 'other' yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) res = yield self.handler.get_room_keys(self.local_user, version) -- cgit 1.5.1 From 72788cf9c1f018a5346a8f9204c6bfe058289fd2 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:45:26 +0000 Subject: support DELETE /version with no args --- synapse/handlers/e2e_room_keys.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 09c2888db6..a43fc7fc7e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -267,7 +267,7 @@ class E2eRoomKeysHandler(object): defer.returnValue(results) @defer.inlineCallbacks - def delete_version(self, user_id, version): + def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup Args: diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8f10e4e1cd..63b1f62f90 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -308,7 +308,7 @@ class RoomKeysVersionServlet(RestServlet): It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. - Returns 404 is the given version does not exist. + Returns 404 if the given version does not exist. GET /room_keys/version/12345 HTTP/1.1 { @@ -330,7 +330,8 @@ class RoomKeysVersionServlet(RestServlet): def on_DELETE(self, request, version): """ Delete the information about a given version of the user's - room_keys backup. Doesn't delete the actual room data. + room_keys backup. If the version part is missing, deletes the most + current backup version (if any). Doesn't delete the actual room data. DELETE /room_keys/version/12345 HTTP/1.1 HTTP/1.1 200 OK -- cgit 1.5.1 From 66a4ca1d28c2c2e22a0343e6db0f5a2bce9ec987 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:45:55 +0000 Subject: 404 nicely if you try to interact with a missing current version --- synapse/storage/e2e_room_keys.py | 51 +++++++++++++++++++++++++----------- tests/handlers/test_e2e_room_keys.py | 22 ++++++++++++++++ 2 files changed, 57 insertions(+), 16 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c2f226396d..d82f223e86 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -184,6 +184,17 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="delete_e2e_room_keys", ) + @staticmethod + def _get_current_version(txn, user_id): + txn.execute( + "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + (user_id,) + ) + row = txn.fetchone() + if not row: + raise StoreError(404, 'No current backup version') + return row[0] + def get_e2e_room_keys_version_info(self, user_id, version=None): """Get info metadata about a version of our room_keys backup. @@ -199,11 +210,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): def _get_e2e_room_keys_version_info_txn(txn): if version is None: - txn.execute( - "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", - (user_id,) - ) - this_version = txn.fetchone()[0] + this_version = self._get_current_version(txn, user_id) else: this_version = version @@ -266,23 +273,35 @@ class EndToEndRoomKeyStore(SQLBaseStore): "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) - @defer.inlineCallbacks - def delete_e2e_room_keys_version(self, user_id, version): + def delete_e2e_room_keys_version(self, user_id, version=None): """Delete a given backup version of the user's room keys. Doesn't delete their actual key data. Args: user_id(str): the user whose backup version we're deleting - version(str): the ID of the backup version we're deleting + version(str): Optional. the version ID of the backup version we're deleting + If missing, we delete the current backup version info. + Raises: + StoreError: with code 404 if there are no e2e_room_keys_versions present, + or if the version requested doesn't exist. """ - keyvalues = { - "user_id": user_id, - "version": version, - } + def _delete_e2e_room_keys_version_txn(txn): + if version is None: + this_version = self._get_current_version(txn, user_id) + else: + this_version = version - yield self._simple_delete( - table="e2e_room_keys_versions", - keyvalues=keyvalues, - desc="delete_e2e_room_keys_version", + return self._simple_delete_one_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": this_version, + }, + ) + + return self.runInteraction( + "delete_e2e_room_keys_version", + _delete_e2e_room_keys_version_txn ) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 6e43543ed9..8bfffb5c0e 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -123,6 +123,28 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): "auth_data": "second_version_auth_data", }) + @defer.inlineCallbacks + def test_delete_missing_version(self): + """Check that we get a 404 on deleting nonexistent versions + """ + res = None + try: + yield self.handler.delete_version(self.local_user, "1") + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + + @defer.inlineCallbacks + def test_delete_missing_current_version(self): + """Check that we get a 404 on deleting nonexistent current version + """ + res = None + try: + yield self.handler.delete_version(self.local_user) + except errors.SynapseError as e: + res = e.code + self.assertEqual(res, 404) + @defer.inlineCallbacks def test_delete_version(self): """Check that we can create and then delete versions. -- cgit 1.5.1 From 54ac18e8327e689b9f91b93ad8caac2f6b3dd29c Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:48:22 +0000 Subject: use parse_string --- synapse/rest/client/v2_alpha/room_keys.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 63b1f62f90..afb8a36dd0 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -123,7 +123,7 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = request.args.get("version")[0] + version = parse_string(request, "version") if session_id: body = { @@ -199,7 +199,7 @@ class RoomKeysServlet(RestServlet): """ requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version")[0] + version = parse_string(request, "version") room_keys = yield self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id @@ -229,7 +229,7 @@ class RoomKeysServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = request.args.get("version")[0] + version = parse_string(request, "version") yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id -- cgit 1.5.1 From f0cede5556a54ece6cd5289856bb230f8009c4fa Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:58:32 +0000 Subject: missing import --- synapse/storage/e2e_room_keys.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index d82f223e86..089989fcfa 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -14,6 +14,7 @@ # limitations under the License. from twisted.internet import defer +from synapse.api.errors import StoreError from ._base import SQLBaseStore -- cgit 1.5.1 From 4f7064f6b5a1822067bfbf358317bc1dbb51b9c6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Jan 2018 23:59:07 +0000 Subject: missing import --- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index afb8a36dd0..9f0172e6f5 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request + RestServlet, parse_json_object_from_request, parse_string ) from ._base import client_v2_patterns -- cgit 1.5.1 From 63417c31e9d32992fbb422b630024946494ff26b Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 13 Aug 2018 22:36:52 +0100 Subject: fix typo --- synapse/server_notices/resource_limits_server_notices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 017828f81e..e4be836439 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -34,7 +34,7 @@ class ResourceLimitsServerNotices(object): self._store = hs.get_datastore() self.auth = hs.get_auth() self._server_notice_content = hs.config.user_consent_server_notice_content - self._limit_usage_by_mau = hs.config.limit_usage_by_mau = False + self._limit_usage_by_mau = hs.config.limit_usage_by_mau self._hs_disabled = hs.config.hs_disabled self._notified_of_blocking = set() -- cgit 1.5.1 From 9b75c78b4df276fe40d3f1b92a57481c3e970886 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 14 Aug 2018 11:20:41 +0100 Subject: support server notice state events for resource limits --- synapse/api/constants.py | 2 ++ synapse/server_notices/consent_server_notices.py | 3 ++- .../resource_limits_server_notices.py | 21 ++++++++++++++++----- synapse/server_notices/server_notices_manager.py | 4 ++-- 4 files changed, 22 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index b0da506f6d..ef60841995 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -79,6 +79,8 @@ class EventTypes(object): ServerACL = "m.room.server_acl" + ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" + class RejectedReason(object): AUTH_ERROR = "auth_error" diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 5e3044d164..6d5caedb08 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -22,6 +22,7 @@ from synapse.api.errors import SynapseError from synapse.api.urls import ConsentURIBuilder from synapse.config import ConfigError from synapse.types import get_localpart_from_id +from synapse.api.constants import EventTypes logger = logging.getLogger(__name__) @@ -103,7 +104,7 @@ class ConsentServerNotices(object): }, ) yield self._server_notices_manager.send_notice( - user_id, content, + user_id, content, EventTypes.Message ) yield self._store.user_set_consent_server_notice_sent( user_id, self._current_consent_version, diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index e4be836439..2f49dae168 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError +from synapse.api.constants import EventTypes logger = logging.getLogger(__name__) @@ -61,22 +62,32 @@ class ResourceLimitsServerNotices(object): # In practice, not sure we can ever get here return try: + # Normally should always pass in user_id if you have it, but in + # this case are checking what would happen to other users if they + # were to arrive. yield self.auth.check_auth_blocking() self._resouce_limited = False # Need to start removing notices if user_id in self._notified_of_blocking: # Send message to remove warning - needs updating - content = "remove warning" + content = { + 'body': '', + 'admin_email': '', + } self._send_server_notice(user_id, content) self._notified_of_blocking.remove(user_id) - except AuthError: + except AuthError as e: # Need to start notifying of blocking self._resouce_limited = True if user_id not in self._notified_of_blocking: - # Send message to add warning - needs updating - content = "add warning" + # TODO use admin email contained in error once PR lands + content = { + 'body': e.msg, + 'admin_email': 'stunt@adminemail.com', + 'msgtype': 'm.text' + } self._send_server_notice(user_id, content) self._notified_of_blocking.add(user_id) @@ -93,7 +104,7 @@ class ResourceLimitsServerNotices(object): """ try: yield self._server_notices_manager.send_notice( - user_id, content, + user_id, content, EventTypes.ServerNoticeLimitReached ) except SynapseError as e: logger.error("Error sending server notice about resource limits: %s", e) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index a26deace53..4806977a89 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -46,7 +46,7 @@ class ServerNoticesManager(object): return self._config.server_notices_mxid is not None @defer.inlineCallbacks - def send_notice(self, user_id, event_content): + def send_notice(self, user_id, event_content, type): """Send a notice to the given user Creates the server notices room, if none exists. @@ -67,7 +67,7 @@ class ServerNoticesManager(object): yield self._event_creation_handler.create_and_send_nonmember_event( requester, { - "type": EventTypes.Message, + "type": type, "room_id": room_id, "sender": system_mxid, "content": event_content, -- cgit 1.5.1 From e2c9fe0a6a6dbe4f81402b79b6605e17fb05e5d4 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 14 Aug 2018 13:32:56 +0100 Subject: backout ability to pass in event type to server notices --- synapse/server_notices/consent_server_notices.py | 2 +- synapse/server_notices/resource_limits_server_notices.py | 3 ++- synapse/server_notices/server_notices_manager.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 6d5caedb08..783ceae1ad 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -104,7 +104,7 @@ class ConsentServerNotices(object): }, ) yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.Message + user_id, content ) yield self._store.user_set_consent_server_notice_sent( user_id, self._current_consent_version, diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 2f49dae168..0470f66205 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -91,6 +91,7 @@ class ResourceLimitsServerNotices(object): self._send_server_notice(user_id, content) self._notified_of_blocking.add(user_id) + @defer.inlineCallbacks def _send_server_notice(self, user_id, content): """Sends Server notice @@ -104,7 +105,7 @@ class ResourceLimitsServerNotices(object): """ try: yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.ServerNoticeLimitReached + user_id, content ) except SynapseError as e: logger.error("Error sending server notice about resource limits: %s", e) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 4806977a89..a26deace53 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -46,7 +46,7 @@ class ServerNoticesManager(object): return self._config.server_notices_mxid is not None @defer.inlineCallbacks - def send_notice(self, user_id, event_content, type): + def send_notice(self, user_id, event_content): """Send a notice to the given user Creates the server notices room, if none exists. @@ -67,7 +67,7 @@ class ServerNoticesManager(object): yield self._event_creation_handler.create_and_send_nonmember_event( requester, { - "type": type, + "type": EventTypes.Message, "room_id": room_id, "sender": system_mxid, "content": event_content, -- cgit 1.5.1 From c24fc9797bc0dd59c0d8131ea09c1178e39b76f6 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 15 Aug 2018 15:04:30 +0100 Subject: add new event types --- synapse/api/constants.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index ef60841995..1fb24578e2 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -78,6 +78,7 @@ class EventTypes(object): Name = "m.room.name" ServerACL = "m.room.server_acl" + Pinned = "m.room.pinned_events" ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" -- cgit 1.5.1 From eabc5f827107a24717655e0eec376adf88b574c4 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 15 Aug 2018 15:04:52 +0100 Subject: wip cut at sending resource server notices --- .../resource_limits_server_notices.py | 69 +++++++++++----------- synapse/server_notices/server_notices_manager.py | 30 ++++++---- 2 files changed, 55 insertions(+), 44 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 0470f66205..94d0f98189 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -40,6 +40,7 @@ class ResourceLimitsServerNotices(object): self._notified_of_blocking = set() self._resouce_limited = False + # Config checks? @defer.inlineCallbacks @@ -69,43 +70,43 @@ class ResourceLimitsServerNotices(object): self._resouce_limited = False # Need to start removing notices if user_id in self._notified_of_blocking: - # Send message to remove warning - needs updating + # Send message to remove warning + # send state event here + # How do I do this? if drop the id, how to refer to it? content = { - 'body': '', - 'admin_email': '', + "pinned":[] } - self._send_server_notice(user_id, content) + yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.Pinned, '', + ) + self._notified_of_blocking.remove(user_id) except AuthError as e: # Need to start notifying of blocking - - self._resouce_limited = True - if user_id not in self._notified_of_blocking: - # TODO use admin email contained in error once PR lands - content = { - 'body': e.msg, - 'admin_email': 'stunt@adminemail.com', - 'msgtype': 'm.text' - } - self._send_server_notice(user_id, content) - self._notified_of_blocking.add(user_id) - - - @defer.inlineCallbacks - def _send_server_notice(self, user_id, content): - """Sends Server notice - - Args: - user_id(str): The user to send to - content(str): The content of the message - - Returns: - Deferred[] - """ - try: - yield self._server_notices_manager.send_notice( - user_id, content - ) - except SynapseError as e: - logger.error("Error sending server notice about resource limits: %s", e) + try: + self._resouce_limited = True + if user_id not in self._notified_of_blocking: + # TODO use admin email contained in error once PR lands + content = { + 'body': e.msg, + 'admin_email': 'stunt@adminemail.com', + } + event = yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.ServerNoticeLimitReached + ) + + # send server notices state event here + # TODO Over writing pinned events + content = { + "pinned":[ + event.event_id, + ] + } + yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.Pinned, '', + ) + + self._notified_of_blocking.add(user_id) + except SynapseError as e: + logger.error("Error sending server notice about resource limits: %s", e) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index a26deace53..3e57f8211a 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -46,7 +46,10 @@ class ServerNoticesManager(object): return self._config.server_notices_mxid is not None @defer.inlineCallbacks - def send_notice(self, user_id, event_content): + def send_notice( + self, user_id, event_content, + type=EventTypes.Message, state_key=None + ): """Send a notice to the given user Creates the server notices room, if none exists. @@ -54,9 +57,11 @@ class ServerNoticesManager(object): Args: user_id (str): mxid of user to send event to. event_content (dict): content of event to send + type(EventTypes): type of event + is_state_event(bool): Is the event a state event Returns: - Deferred[None] + Deferred[FrozenEvent] """ room_id = yield self.get_notice_room_for_user(user_id) @@ -65,15 +70,19 @@ class ServerNoticesManager(object): logger.info("Sending server notice to %s", user_id) - yield self._event_creation_handler.create_and_send_nonmember_event( - requester, { - "type": EventTypes.Message, - "room_id": room_id, - "sender": system_mxid, - "content": event_content, - }, - ratelimit=False, + event_dict = { + "type": type, + "room_id": room_id, + "sender": system_mxid, + "content": event_content, + } + if state_key: + event_dict['state_key'] = state_key + + res = yield self._event_creation_handler.create_and_send_nonmember_event( + requester, event_dict, ratelimit=False, ) + defer.returnValue(res) @cachedInlineCallbacks() def get_notice_room_for_user(self, user_id): @@ -141,6 +150,7 @@ class ServerNoticesManager(object): creator_join_profile=join_profile, ) room_id = info['room_id'] + yield self._store.add_tag_to_room(user_id, room_id, 'm.server_notice', None) logger.info("Created server notices room %s for %s", room_id, user_id) defer.returnValue(room_id) -- cgit 1.5.1 From c055c91655cbe5d4e6b36d86a00dafb02c585b6d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 11:10:19 +0100 Subject: fix case where empty string state check is evaulated as False --- synapse/server_notices/server_notices_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 3e57f8211a..22c819cc5b 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -76,7 +76,8 @@ class ServerNoticesManager(object): "sender": system_mxid, "content": event_content, } - if state_key: + + if state_key is not None: event_dict['state_key'] = state_key res = yield self._event_creation_handler.create_and_send_nonmember_event( -- cgit 1.5.1 From df1e4f259fda3009dacbb3211e999f7d89adaf87 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 11:10:53 +0100 Subject: WIP impl commiting to get feedback --- .../resource_limits_server_notices.py | 80 ++++++++++++++++++++-- 1 file changed, 74 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 94d0f98189..2b714a1016 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -40,7 +40,8 @@ class ResourceLimitsServerNotices(object): self._notified_of_blocking = set() self._resouce_limited = False - + self._message_handler = hs.get_message_handler() + self._state = hs.get_state_handler() # Config checks? @defer.inlineCallbacks @@ -57,6 +58,72 @@ class ResourceLimitsServerNotices(object): return if self._limit_usage_by_mau is True: + room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + + + # Alternate impl - currently inlcuded because I'm not sure I am on + # the right track and want to share WIP + + # logger.info("GET STATE EVENTS") + # currently_blocked = False + # events = [] + # try: + # events = yield self._message_handler.get_state_events(user_id, room_id, types=[(EventTypes.Pinned, None)]) + # except AuthError as e: + # # The user has yet to join the server notices room + # pass + # + # pinned_event_refs = [] + # for e in events: + # logger.info('events %s' % e) + # logger.info(type(e)) + # for key, event_ids in e['content'].items(): + # logger.info('Key Event %s %s' % (key, event_ids)) + # if key == 'pinned': + # pinned_event_refs = event_ids + # + # logger.info('pinned_event_refs %s' % pinned_event_refs) + # + # events = yield self._store.get_events(pinned_event_refs) + # logger.info(events) + # for event_id, event in events.items(): + # logger.info("event_id, event event.type %s %s %s" % (event_id, event, event.type)) + # if event.type == 'm.server_notice.usage_limit_reached': + # currently_blocked = True + # + # logger.info('Currently Blocked is %r' % currently_blocked) + + #for e in events: + # logger.info(e) + currently_blocked = False + logger.info("GET CURRENT STATE") + pinned_state_event = yield self._state.get_current_state(room_id, event_type=EventTypes.Pinned) + logger.info(events) + logger.info(events.get('content')) + + referenced_events = [] + if pinned_state_event is not None: + content = pinned_state_event.get('content') + if content is not None: + referenced_events = content.get('pinned') + + events = yield self._store.get_events(referenced_events) + logger.info(events) + for event_id, event in events.items(): + logger.info("event_id, event event.type %s %s %s" % (event_id, event, event.type)) + if event.type == 'm.server_notice.usage_limit_reached': + currently_blocked = True + + logger.info("currently_blocked is %r" % currently_blocked) + + #event = yield self._store.get_event(events.event_id) + #logger.info(event) + + #logger.info("GET CURRENT STATE IDs") + #events = yield self._state.get_current_state_ids(room_id) + #for k,v in events.items(): + # logger.info('%s %s' % (k,v)) + timestamp = yield self._store.user_last_seen_monthly_active(user_id) if timestamp is None: # This user will be blocked from receiving the notice anyway. @@ -69,7 +136,8 @@ class ResourceLimitsServerNotices(object): yield self.auth.check_auth_blocking() self._resouce_limited = False # Need to start removing notices - if user_id in self._notified_of_blocking: + # if user_id in self._notified_of_blocking: + if currently_blocked: # Send message to remove warning # send state event here # How do I do this? if drop the id, how to refer to it? @@ -79,14 +147,14 @@ class ResourceLimitsServerNotices(object): yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', ) - - self._notified_of_blocking.remove(user_id) + logger.info('deactivate block') except AuthError as e: # Need to start notifying of blocking try: self._resouce_limited = True - if user_id not in self._notified_of_blocking: + #if user_id not in self._notified_of_blocking: + if not currently_blocked: # TODO use admin email contained in error once PR lands content = { 'body': e.msg, @@ -103,10 +171,10 @@ class ResourceLimitsServerNotices(object): event.event_id, ] } + logger.info("active block") yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', ) - self._notified_of_blocking.add(user_id) except SynapseError as e: logger.error("Error sending server notice about resource limits: %s", e) -- cgit 1.5.1 From a675f9c5560a16655a9337992499690c9f27a7eb Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 14:53:35 +0100 Subject: check for room state before deciding on action --- synapse/server_notices/consent_server_notices.py | 1 - .../resource_limits_server_notices.py | 92 ++++++---------------- .../test_resource_limits_server_notices.py | 52 +++++++----- 3 files changed, 58 insertions(+), 87 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 783ceae1ad..a9fe58d9ff 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -22,7 +22,6 @@ from synapse.api.errors import SynapseError from synapse.api.urls import ConsentURIBuilder from synapse.config import ConfigError from synapse.types import get_localpart_from_id -from synapse.api.constants import EventTypes logger = logging.getLogger(__name__) diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 2b714a1016..43a926fc79 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -16,8 +16,8 @@ import logging from twisted.internet import defer -from synapse.api.errors import AuthError, SynapseError from synapse.api.constants import EventTypes +from synapse.api.errors import AuthError, SynapseError logger = logging.getLogger(__name__) @@ -35,10 +35,10 @@ class ResourceLimitsServerNotices(object): self._store = hs.get_datastore() self.auth = hs.get_auth() self._server_notice_content = hs.config.user_consent_server_notice_content + self._admin_uri = hs.config.admin_uri self._limit_usage_by_mau = hs.config.limit_usage_by_mau self._hs_disabled = hs.config.hs_disabled - self._notified_of_blocking = set() self._resouce_limited = False self._message_handler = hs.get_message_handler() self._state = hs.get_state_handler() @@ -58,83 +58,45 @@ class ResourceLimitsServerNotices(object): return if self._limit_usage_by_mau is True: - room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + timestamp = yield self._store.user_last_seen_monthly_active(user_id) + if timestamp is None: + # This user will be blocked from receiving the notice anyway. + # In practice, not sure we can ever get here + return + room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) - # Alternate impl - currently inlcuded because I'm not sure I am on - # the right track and want to share WIP - - # logger.info("GET STATE EVENTS") - # currently_blocked = False - # events = [] - # try: - # events = yield self._message_handler.get_state_events(user_id, room_id, types=[(EventTypes.Pinned, None)]) - # except AuthError as e: - # # The user has yet to join the server notices room - # pass - # - # pinned_event_refs = [] - # for e in events: - # logger.info('events %s' % e) - # logger.info(type(e)) - # for key, event_ids in e['content'].items(): - # logger.info('Key Event %s %s' % (key, event_ids)) - # if key == 'pinned': - # pinned_event_refs = event_ids - # - # logger.info('pinned_event_refs %s' % pinned_event_refs) - # - # events = yield self._store.get_events(pinned_event_refs) - # logger.info(events) - # for event_id, event in events.items(): - # logger.info("event_id, event event.type %s %s %s" % (event_id, event, event.type)) - # if event.type == 'm.server_notice.usage_limit_reached': - # currently_blocked = True - # - # logger.info('Currently Blocked is %r' % currently_blocked) - - #for e in events: - # logger.info(e) currently_blocked = False logger.info("GET CURRENT STATE") - pinned_state_event = yield self._state.get_current_state(room_id, event_type=EventTypes.Pinned) - logger.info(events) - logger.info(events.get('content')) + pinned_state_event = None + try: + pinned_state_event = yield self._state.get_current_state( + room_id, event_type=EventTypes.Pinned + ) + except AuthError as e: + # The user has yet to join the server notices room + pass referenced_events = [] if pinned_state_event is not None: - content = pinned_state_event.get('content') - if content is not None: - referenced_events = content.get('pinned') + referenced_events = pinned_state_event.content.get('pinned') events = yield self._store.get_events(referenced_events) logger.info(events) for event_id, event in events.items(): - logger.info("event_id, event event.type %s %s %s" % (event_id, event, event.type)) - if event.type == 'm.server_notice.usage_limit_reached': + logger.info("event_id, event event.type %s %s %s" % ( + event_id, event, event.type) + ) + if event.type == EventTypes.ServerNoticeLimitReached: currently_blocked = True logger.info("currently_blocked is %r" % currently_blocked) - - #event = yield self._store.get_event(events.event_id) - #logger.info(event) - - #logger.info("GET CURRENT STATE IDs") - #events = yield self._state.get_current_state_ids(room_id) - #for k,v in events.items(): - # logger.info('%s %s' % (k,v)) - - timestamp = yield self._store.user_last_seen_monthly_active(user_id) - if timestamp is None: - # This user will be blocked from receiving the notice anyway. - # In practice, not sure we can ever get here - return try: # Normally should always pass in user_id if you have it, but in # this case are checking what would happen to other users if they # were to arrive. yield self.auth.check_auth_blocking() - self._resouce_limited = False + # Need to start removing notices # if user_id in self._notified_of_blocking: if currently_blocked: @@ -142,7 +104,7 @@ class ResourceLimitsServerNotices(object): # send state event here # How do I do this? if drop the id, how to refer to it? content = { - "pinned":[] + "pinned": [] } yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', @@ -152,13 +114,11 @@ class ResourceLimitsServerNotices(object): except AuthError as e: # Need to start notifying of blocking try: - self._resouce_limited = True - #if user_id not in self._notified_of_blocking: if not currently_blocked: # TODO use admin email contained in error once PR lands content = { 'body': e.msg, - 'admin_email': 'stunt@adminemail.com', + 'admin_uri': self._admin_uri, } event = yield self._server_notices_manager.send_notice( user_id, content, EventTypes.ServerNoticeLimitReached @@ -167,7 +127,7 @@ class ResourceLimitsServerNotices(object): # send server notices state event here # TODO Over writing pinned events content = { - "pinned":[ + "pinned": [ event.event_id, ] } @@ -177,4 +137,4 @@ class ResourceLimitsServerNotices(object): ) except SynapseError as e: - logger.error("Error sending server notice about resource limits: %s", e) + logger.error("Error sending resource limits server notice: %s", e) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index a69253f1b6..fb8c593a75 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -2,6 +2,7 @@ from mock import Mock from twisted.internet import defer +from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.handlers.auth import AuthHandler from synapse.server_notices.resource_limits_server_notices import ( @@ -20,7 +21,7 @@ class AuthHandlers(object): class TestResourceLimitsServerNotices(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(handlers=None) + self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) self.hs.handlers = AuthHandlers(self.hs) self.auth_handler = self.hs.handlers.auth_handler self.server_notices_sender = self.hs.get_server_notices_sender() @@ -37,10 +38,23 @@ class TestResourceLimitsServerNotices(unittest.TestCase): ) self._send_notice = self._rlsn._server_notices_manager.send_notice self._rlsn._server_notices_manager.send_notice = Mock() + self._rlsn._state.get_current_state = Mock(return_value=defer.succeed(None)) + self._rlsn._store.get_events = Mock(return_value=defer.succeed({})) + self._send_notice = self._rlsn._server_notices_manager.send_notice self._rlsn._limit_usage_by_mau = True - self.user_id = "user_id" + self.user_id = "@user_id:test" + + self.server_notices_mxid = "@server:test" + self.server_notices_mxid_display_name = None + self.server_notices_mxid_avatar_url = None + self.server_notices_room_name = "Server Notices" + + self._rlsn._server_notices_manager.get_notice_room_for_user = Mock( + returnValue="" + ) + self.hs.config.admin_uri = "mailto:user@test.com" @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_flag_off(self): @@ -48,13 +62,13 @@ class TestResourceLimitsServerNotices(unittest.TestCase): # test hs disabled case self._hs_disabled = True - yield self._rlsn.maybe_send_server_notice_to_user("user_id") + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() # Test when mau limiting disabled self._hs_disabled = False self._rlsn._limit_usage_by_mau = False - yield self._rlsn.maybe_send_server_notice_to_user("user_id") + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() @@ -62,40 +76,40 @@ class TestResourceLimitsServerNotices(unittest.TestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn._notified_of_blocking.add(self.user_id) self._rlsn.auth.check_auth_blocking = Mock() + mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) + self._rlsn._store.get_events = Mock(return_value=defer.succeed( + {"123": mock_event} + )) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) - # "remove warning" obviously aweful, but test will start failing when code - # actually sends a real event, and then it can be updated - - self._send_notice.assert_called_once_with(self.user_id, "remove warning") - self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) + # Would be better to check the content, but once == remove blocking event + self._send_notice.assert_called_once() @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): """Test when user has blocked notice, but notice ought to be there (NOOP)""" - self._rlsn._notified_of_blocking.add(self.user_id) self._rlsn.auth.check_auth_blocking = Mock( side_effect=AuthError(403, 'foo') ) - yield self._rlsn.maybe_send_server_notice_to_user("user_id") + mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) + self._rlsn._store.get_events = Mock(return_value=defer.succeed( + {"123": mock_event} + )) + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() - self.assertTrue(self.user_id in self._rlsn._notified_of_blocking) @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_add_blocked_notice(self): """Test when user does not have blocked notice, but should have one""" self._rlsn.auth.check_auth_blocking = Mock(side_effect=AuthError(403, 'foo')) - yield self._rlsn.maybe_send_server_notice_to_user("user_id") + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) - # "add warning" obviously awful, but test will start failing when code - # actually sends a real event, and then it can be updated - self._send_notice.assert_called_once_with(self.user_id, "add warning") - self.assertTrue(self.user_id in self._rlsn._notified_of_blocking) + # Would be better to check contents, but 2 calls == set blocking event + self.assertTrue(self._send_notice.call_count == 2) @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): @@ -106,7 +120,6 @@ class TestResourceLimitsServerNotices(unittest.TestCase): yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() - self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): @@ -122,4 +135,3 @@ class TestResourceLimitsServerNotices(unittest.TestCase): yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() - self.assertFalse(self.user_id in self._rlsn._notified_of_blocking) -- cgit 1.5.1 From eff3ae3b9a8ab558a106ce3fe69774a2e594cf72 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 15:48:34 +0100 Subject: add room tagging --- synapse/server_notices/resource_limits_server_notices.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 43a926fc79..570e8307cc 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -66,8 +66,12 @@ class ResourceLimitsServerNotices(object): room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + # Need to set tag here because room may have been created prior to + # tagging being set on creation. Ideally would set something to read + # room tags first, and cache that aggressively/ + yield self._store.add_tag_to_room(user_id, room_id, 'm.server_notice', None) + currently_blocked = False - logger.info("GET CURRENT STATE") pinned_state_event = None try: pinned_state_event = yield self._state.get_current_state( @@ -82,15 +86,9 @@ class ResourceLimitsServerNotices(object): referenced_events = pinned_state_event.content.get('pinned') events = yield self._store.get_events(referenced_events) - logger.info(events) for event_id, event in events.items(): - logger.info("event_id, event event.type %s %s %s" % ( - event_id, event, event.type) - ) if event.type == EventTypes.ServerNoticeLimitReached: currently_blocked = True - - logger.info("currently_blocked is %r" % currently_blocked) try: # Normally should always pass in user_id if you have it, but in # this case are checking what would happen to other users if they @@ -109,7 +107,6 @@ class ResourceLimitsServerNotices(object): yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', ) - logger.info('deactivate block') except AuthError as e: # Need to start notifying of blocking @@ -131,7 +128,6 @@ class ResourceLimitsServerNotices(object): event.event_id, ] } - logger.info("active block") yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', ) -- cgit 1.5.1 From 3c1080b6e4a4d7c80d67bd9bee45d845aa145088 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 17:02:04 +0100 Subject: refactor for readability, and reuse caching for setting tags --- .../resource_limits_server_notices.py | 176 +++++++++++++-------- synapse/server_notices/server_notices_manager.py | 6 +- .../test_resource_limits_server_notices.py | 1 + 3 files changed, 115 insertions(+), 68 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 570e8307cc..61becca758 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.errors import AuthError, SynapseError +from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG logger = logging.getLogger(__name__) @@ -57,80 +58,121 @@ class ResourceLimitsServerNotices(object): if self._hs_disabled is True: return - if self._limit_usage_by_mau is True: - timestamp = yield self._store.user_last_seen_monthly_active(user_id) - if timestamp is None: - # This user will be blocked from receiving the notice anyway. - # In practice, not sure we can ever get here - return - - room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + if self._limit_usage_by_mau is False: + return - # Need to set tag here because room may have been created prior to - # tagging being set on creation. Ideally would set something to read - # room tags first, and cache that aggressively/ - yield self._store.add_tag_to_room(user_id, room_id, 'm.server_notice', None) + timestamp = yield self._store.user_last_seen_monthly_active(user_id) + if timestamp is None: + # This user will be blocked from receiving the notice anyway. + # In practice, not sure we can ever get here + return - currently_blocked = False - pinned_state_event = None - try: - pinned_state_event = yield self._state.get_current_state( - room_id, event_type=EventTypes.Pinned + # Determine current state of room + + room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + + yield self._check_and_set_tags(user_id, room_id) + currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id) + + try: + # Normally should always pass in user_id if you have it, but in + # this case are checking what would happen to other users if they + # were to arrive. + yield self.auth.check_auth_blocking() + if currently_blocked: + # Room is notifying of a block, when it ought not to be. + # Remove block notification + content = { + "pinned": ref_events + } + yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.Pinned, '', ) - except AuthError as e: - # The user has yet to join the server notices room - pass - - referenced_events = [] - if pinned_state_event is not None: - referenced_events = pinned_state_event.content.get('pinned') - - events = yield self._store.get_events(referenced_events) - for event_id, event in events.items(): - if event.type == EventTypes.ServerNoticeLimitReached: - currently_blocked = True + + except AuthError as e: + try: - # Normally should always pass in user_id if you have it, but in - # this case are checking what would happen to other users if they - # were to arrive. - yield self.auth.check_auth_blocking() - - # Need to start removing notices - # if user_id in self._notified_of_blocking: - if currently_blocked: - # Send message to remove warning - # send state event here - # How do I do this? if drop the id, how to refer to it? + if not currently_blocked: + # Room is not notifying of a block, when it ought to be. + # Add block notification content = { - "pinned": [] + 'body': e.msg, + 'admin_uri': self._admin_uri, + } + event = yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.ServerNoticeLimitReached + ) + + content = { + "pinned": [ + event.event_id, + ] } yield self._server_notices_manager.send_notice( user_id, content, EventTypes.Pinned, '', ) - except AuthError as e: - # Need to start notifying of blocking - try: - if not currently_blocked: - # TODO use admin email contained in error once PR lands - content = { - 'body': e.msg, - 'admin_uri': self._admin_uri, - } - event = yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.ServerNoticeLimitReached - ) - - # send server notices state event here - # TODO Over writing pinned events - content = { - "pinned": [ - event.event_id, - ] - } - yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.Pinned, '', - ) - - except SynapseError as e: - logger.error("Error sending resource limits server notice: %s", e) + except SynapseError as e: + logger.error("Error sending resource limits server notice: %s", e) + + @defer.inlineCallbacks + def _check_and_set_tags(self, user_id, room_id): + """ + Since server notices rooms were originally not with tags, + important to check that tags have been set correctly + Args: + user_id(str): the user in question + room_id(str): the server notices room for that user + """ + tags = yield self._store.get_tags_for_user(user_id) + server_notices_tags = tags.get(room_id) + need_to_set_tag = True + if server_notices_tags: + if server_notice_tags.get(SERVER_NOTICE_ROOM_TAG): + # tag already present, nothing to do here + need_to_set_tag = False + if need_to_set_tag: + yield self._store.add_tag_to_room( + user_id, room_id, SERVER_NOTICE_ROOM_TAG, None + ) + + @defer.inlineCallbacks + def _is_room_currently_blocked(self, room_id): + """ + Determines if the room is currently blocked + + Args: + room_id(str): The room id of the server notices room + + Returns: + + bool: Is the room currently blocked + list: The list of pinned events that are unrelated to limit blocking + This list can be used as a convenience in the case where the block + is to be lifted and the remaining pinned event references need to be + preserved + """ + currently_blocked = False + pinned_state_event = None + try: + pinned_state_event = yield self._state.get_current_state( + room_id, event_type=EventTypes.Pinned + ) + except AuthError as e: + # The user has yet to join the server notices room + pass + + referenced_events = [] + if pinned_state_event is not None: + referenced_events = pinned_state_event.content.get('pinned') + + events = yield self._store.get_events(referenced_events) + event_to_remove = None + for event_id, event in events.items(): + if event.type == EventTypes.ServerNoticeLimitReached: + currently_blocked = True + # remove event in case we need to disable blocking later on. + if event_id in referenced_events: + referenced_events.remove(event.event_id) + + defer.returnValue((currently_blocked, referenced_events)) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 22c819cc5b..5968104a99 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -22,6 +22,8 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks logger = logging.getLogger(__name__) +SERVER_NOTICE_ROOM_TAG = "m.server_notice" + class ServerNoticesManager(object): def __init__(self, hs): @@ -151,7 +153,9 @@ class ServerNoticesManager(object): creator_join_profile=join_profile, ) room_id = info['room_id'] - yield self._store.add_tag_to_room(user_id, room_id, 'm.server_notice', None) + yield self._store.add_tag_to_room( + user_id, room_id, SERVER_NOTICE_ROOM_TAG, None + ) logger.info("Created server notices room %s for %s", room_id, user_id) defer.returnValue(room_id) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index fb8c593a75..ccb69097b1 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -54,6 +54,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): self._rlsn._server_notices_manager.get_notice_room_for_user = Mock( returnValue="" ) + self._rlsn._store.add_tag_to_room = Mock() self.hs.config.admin_uri = "mailto:user@test.com" @defer.inlineCallbacks -- cgit 1.5.1 From 51b17ec56615b710f4937e3c20208841a6591477 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 16 Aug 2018 17:32:22 +0100 Subject: flake8 --- synapse/server_notices/resource_limits_server_notices.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 61becca758..33625fba72 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -128,7 +128,7 @@ class ResourceLimitsServerNotices(object): server_notices_tags = tags.get(room_id) need_to_set_tag = True if server_notices_tags: - if server_notice_tags.get(SERVER_NOTICE_ROOM_TAG): + if server_notices_tags.get(SERVER_NOTICE_ROOM_TAG): # tag already present, nothing to do here need_to_set_tag = False if need_to_set_tag: @@ -158,7 +158,7 @@ class ResourceLimitsServerNotices(object): pinned_state_event = yield self._state.get_current_state( room_id, event_type=EventTypes.Pinned ) - except AuthError as e: + except AuthError: # The user has yet to join the server notices room pass @@ -167,7 +167,6 @@ class ResourceLimitsServerNotices(object): referenced_events = pinned_state_event.content.get('pinned') events = yield self._store.get_events(referenced_events) - event_to_remove = None for event_id, event in events.items(): if event.type == EventTypes.ServerNoticeLimitReached: currently_blocked = True -- cgit 1.5.1 From 38f708a2bbee13ebd34f1595910dd0e4fd532818 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Aug 2018 11:37:42 +0100 Subject: Remote profile cache should remain in master worker --- synapse/storage/profile.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 246ab836bd..88b50f33b5 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -94,6 +94,8 @@ class ProfileWorkerStore(SQLBaseStore): desc="set_profile_avatar_url", ) + +class ProfileStore(ProfileWorkerStore): def add_remote_profile_cache(self, user_id, displayname, avatar_url): """Ensure we are caching the remote user's profiles. @@ -180,7 +182,3 @@ class ProfileWorkerStore(SQLBaseStore): if res: defer.returnValue(True) - - -class ProfileStore(ProfileWorkerStore): - pass -- cgit 1.5.1 From ca87ad1defac1082462367854cb4a656b7a96e90 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Aug 2018 11:43:16 +0100 Subject: Split ProfileHandler into master and worker --- synapse/handlers/profile.py | 21 ++++++++++++++------- synapse/server.py | 7 +++++-- tests/handlers/test_profile.py | 4 ++-- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6d1fbb1a5c..8b349f6ad6 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -33,12 +33,12 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -class ProfileHandler(BaseHandler): +class WorkerProfileHandler(BaseHandler): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): - super(ProfileHandler, self).__init__(hs) + super(WorkerProfileHandler, self).__init__(hs) self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -47,11 +47,6 @@ class ProfileHandler(BaseHandler): self.user_directory_handler = hs.get_user_directory_handler() - if hs.config.worker_app is None: - self.clock.looping_call( - self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS, - ) - self._notify_master_profile_change = ( ReplicationHandleProfileChangeRestServlet.make_client(hs) ) @@ -298,6 +293,18 @@ class ProfileHandler(BaseHandler): room_id, str(e.message) ) + +class MasterProfileHandler(WorkerProfileHandler): + PROFILE_UPDATE_MS = 60 * 1000 + PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 + + def __init__(self, hs): + super(MasterProfileHandler, self).__init__(hs) + + self.clock.looping_call( + self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS, + ) + def _start_update_remote_profile_cache(self): return run_as_background_process( "Update remote profile", self._update_remote_profile_cache, diff --git a/synapse/server.py b/synapse/server.py index 140be9ebe8..be85aad8cf 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -55,7 +55,7 @@ from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler from synapse.handlers.presence import PresenceHandler -from synapse.handlers.profile import ProfileHandler +from synapse.handlers.profile import MasterProfileHandler, WorkerProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.room import RoomContextHandler, RoomCreationHandler @@ -307,7 +307,10 @@ class HomeServer(object): return InitialSyncHandler(self) def build_profile_handler(self): - return ProfileHandler(self) + if self.config.worker_app: + return WorkerProfileHandler(self) + else: + return MasterProfileHandler(self) def build_event_creation_handler(self): return EventCreationHandler(self) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index dc17918a3d..07cf5f4c8e 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -20,7 +20,7 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import AuthError -from synapse.handlers.profile import ProfileHandler +from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID from tests import unittest @@ -29,7 +29,7 @@ from tests.utils import setup_test_homeserver class ProfileHandlers(object): def __init__(self, hs): - self.profile_handler = ProfileHandler(hs) + self.profile_handler = MasterProfileHandler(hs) class ProfileTestCase(unittest.TestCase): -- cgit 1.5.1 From d49b77404beab498ad8c16bd55e811b423851240 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 17 Aug 2018 15:21:34 +0100 Subject: clean up, no functional changes --- synapse/server_notices/consent_server_notices.py | 2 +- .../resource_limits_server_notices.py | 86 ++++++++++++---------- .../test_resource_limits_server_notices.py | 32 ++++---- 3 files changed, 64 insertions(+), 56 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index a9fe58d9ff..5e3044d164 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -103,7 +103,7 @@ class ConsentServerNotices(object): }, ) yield self._server_notices_manager.send_notice( - user_id, content + user_id, content, ) yield self._store.user_set_consent_server_notice_sent( user_id, self._current_consent_version, diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 33625fba72..5987e448b2 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -14,40 +14,41 @@ # limitations under the License. import logging +from six import iteritems + from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError, SynapseError +from synapse.api.errors import AuthError, ResourceLimitError, SynapseError from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG logger = logging.getLogger(__name__) class ResourceLimitsServerNotices(object): - """ + """ Keeps track of whether the server has reached it's resource limit and + ensures that the client is kept up to date. """ def __init__(self, hs): """ - Args: hs (synapse.server.HomeServer): """ self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastore() - self.auth = hs.get_auth() - self._server_notice_content = hs.config.user_consent_server_notice_content - self._admin_uri = hs.config.admin_uri - self._limit_usage_by_mau = hs.config.limit_usage_by_mau - self._hs_disabled = hs.config.hs_disabled - + self._auth = hs.get_auth() + self._config = hs.config self._resouce_limited = False self._message_handler = hs.get_message_handler() self._state = hs.get_state_handler() - # Config checks? @defer.inlineCallbacks def maybe_send_server_notice_to_user(self, user_id): - """Check if we need to send a notice to this user, and does so if so + """Check if we need to send a notice to this user, this will be true in + two cases. + 1. The server has reached its limit does not reflect this + 2. The room state indicates that the server has reached its limit when + actually the server is fine Args: user_id (str): user to check @@ -55,10 +56,10 @@ class ResourceLimitsServerNotices(object): Returns: Deferred """ - if self._hs_disabled is True: + if self._config.hs_disabled is True: return - if self._limit_usage_by_mau is False: + if self._config.limit_usage_by_mau is False: return timestamp = yield self._store.user_last_seen_monthly_active(user_id) @@ -78,8 +79,15 @@ class ResourceLimitsServerNotices(object): # Normally should always pass in user_id if you have it, but in # this case are checking what would happen to other users if they # were to arrive. - yield self.auth.check_auth_blocking() - if currently_blocked: + try: + yield self._auth.check_auth_blocking() + is_auth_blocking = False + except ResourceLimitError as e: + is_auth_blocking = True + event_content = e.msg + event_limit_type = e.limit_type + + if currently_blocked and not is_auth_blocking: # Room is notifying of a block, when it ought not to be. # Remove block notification content = { @@ -89,31 +97,29 @@ class ResourceLimitsServerNotices(object): user_id, content, EventTypes.Pinned, '', ) - except AuthError as e: + elif not currently_blocked and is_auth_blocking: + # Room is not notifying of a block, when it ought to be. + # Add block notification + content = { + 'body': event_content, + 'admin_uri': self._config.admin_uri, + 'limit_type': event_limit_type + } + event = yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.ServerNoticeLimitReached + ) - try: - if not currently_blocked: - # Room is not notifying of a block, when it ought to be. - # Add block notification - content = { - 'body': e.msg, - 'admin_uri': self._admin_uri, - } - event = yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.ServerNoticeLimitReached - ) - - content = { - "pinned": [ - event.event_id, - ] - } - yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.Pinned, '', - ) - - except SynapseError as e: - logger.error("Error sending resource limits server notice: %s", e) + content = { + "pinned": [ + event.event_id, + ] + } + yield self._server_notices_manager.send_notice( + user_id, content, EventTypes.Pinned, '', + ) + + except SynapseError as e: + logger.error("Error sending resource limits server notice: %s", e) @defer.inlineCallbacks def _check_and_set_tags(self, user_id, room_id): @@ -167,7 +173,7 @@ class ResourceLimitsServerNotices(object): referenced_events = pinned_state_event.content.get('pinned') events = yield self._store.get_events(referenced_events) - for event_id, event in events.items(): + for event_id, event in events.iteritems(): if event.type == EventTypes.ServerNoticeLimitReached: currently_blocked = True # remove event in case we need to disable blocking later on. diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index ccb69097b1..cc8d2f539c 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -3,7 +3,7 @@ from mock import Mock from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError +from synapse.api.errors import ResourceLimitError from synapse.handlers.auth import AuthHandler from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, @@ -43,13 +43,13 @@ class TestResourceLimitsServerNotices(unittest.TestCase): self._send_notice = self._rlsn._server_notices_manager.send_notice - self._rlsn._limit_usage_by_mau = True + self.hs.config.limit_usage_by_mau = True self.user_id = "@user_id:test" - self.server_notices_mxid = "@server:test" - self.server_notices_mxid_display_name = None - self.server_notices_mxid_avatar_url = None - self.server_notices_room_name = "Server Notices" + # self.server_notices_mxid = "@server:test" + # self.server_notices_mxid_display_name = None + # self.server_notices_mxid_avatar_url = None + # self.server_notices_room_name = "Server Notices" self._rlsn._server_notices_manager.get_notice_room_for_user = Mock( returnValue="" @@ -61,14 +61,14 @@ class TestResourceLimitsServerNotices(unittest.TestCase): def test_maybe_send_server_notice_to_user_flag_off(self): """Tests cases where the flags indicate nothing to do""" # test hs disabled case - self._hs_disabled = True + self.hs.config.hs_disabled = True yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() # Test when mau limiting disabled - self._hs_disabled = False - self._rlsn._limit_usage_by_mau = False + self.hs.config.hs_disabled = False + self.hs.limit_usage_by_mau = False yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() @@ -77,7 +77,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn.auth.check_auth_blocking = Mock() + self._rlsn._auth.check_auth_blocking = Mock() mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) self._rlsn._store.get_events = Mock(return_value=defer.succeed( {"123": mock_event} @@ -90,8 +90,8 @@ class TestResourceLimitsServerNotices(unittest.TestCase): @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): """Test when user has blocked notice, but notice ought to be there (NOOP)""" - self._rlsn.auth.check_auth_blocking = Mock( - side_effect=AuthError(403, 'foo') + self._rlsn._auth.check_auth_blocking = Mock( + side_effect=ResourceLimitError(403, 'foo') ) mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) @@ -106,7 +106,9 @@ class TestResourceLimitsServerNotices(unittest.TestCase): def test_maybe_send_server_notice_to_user_add_blocked_notice(self): """Test when user does not have blocked notice, but should have one""" - self._rlsn.auth.check_auth_blocking = Mock(side_effect=AuthError(403, 'foo')) + self._rlsn._auth.check_auth_blocking = Mock( + side_effect=ResourceLimitError(403, 'foo') + ) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) # Would be better to check contents, but 2 calls == set blocking event @@ -116,7 +118,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): """Test when user does not have blocked notice, nor should they (NOOP)""" - self._rlsn.auth.check_auth_blocking = Mock() + self._rlsn._auth.check_auth_blocking = Mock() yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) @@ -129,7 +131,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): happen - but ... """ - self._rlsn.auth.check_auth_blocking = Mock() + self._rlsn._auth.check_auth_blocking = Mock() self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=defer.succeed(None) ) -- cgit 1.5.1 From 91cdb6de08aa20f2bc7f8df906eb5b56df387309 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Aug 2018 15:24:16 +0100 Subject: Call UserDirectoryHandler methods directly Turns out that the user directory handling is fairly racey as a bunch of stuff assumes that the processing happens on master, which it doesn't when there is a synapse.app.user_dir worker. So lets just call the function directly until we actually get round to fixing it, since it doesn't make the situation any worse. --- synapse/app/event_creator.py | 2 + synapse/handlers/profile.py | 26 +++--------- synapse/replication/http/__init__.py | 3 +- synapse/replication/http/profile.py | 81 ------------------------------------ 4 files changed, 8 insertions(+), 104 deletions(-) delete mode 100644 synapse/replication/http/profile.py (limited to 'synapse') diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 2c9a735859..a2bdaf2a2c 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -58,6 +58,7 @@ from synapse.rest.client.v1.room import ( ) from synapse.server import HomeServer from synapse.storage.engines import create_engine +from synapse.storage.user_directory import UserDirectoryStore from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole @@ -67,6 +68,7 @@ logger = logging.getLogger("synapse.app.event_creator") class EventCreatorSlavedStore( + UserDirectoryStore, DirectoryStore, SlavedTransactionStore, SlavedProfileStore, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 8b349f6ad6..3e1d95d5e5 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -25,7 +25,6 @@ from synapse.api.errors import ( SynapseError, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.http.profile import ReplicationHandleProfileChangeRestServlet from synapse.types import UserID, get_domain_from_id from ._base import BaseHandler @@ -47,10 +46,6 @@ class WorkerProfileHandler(BaseHandler): self.user_directory_handler = hs.get_user_directory_handler() - self._notify_master_profile_change = ( - ReplicationHandleProfileChangeRestServlet.make_client(hs) - ) - @defer.inlineCallbacks def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -166,16 +161,10 @@ class WorkerProfileHandler(BaseHandler): ) if self.hs.config.user_directory_search_all_users: - if self.hs.config.worker_app is None: - profile = yield self.store.get_profileinfo(target_user.localpart) - yield self.user_directory_handler.handle_local_profile_change( - target_user.to_string(), profile - ) - else: - yield self._notify_master_profile_change( - requester=requester, - user_id=target_user.to_string(), - ) + profile = yield self.store.get_profileinfo(target_user.localpart) + yield self.user_directory_handler.handle_local_profile_change( + target_user.to_string(), profile + ) yield self._update_join_states(requester, target_user) @@ -225,16 +214,11 @@ class WorkerProfileHandler(BaseHandler): target_user.localpart, new_avatar_url ) - if self.hs.config.worker_app is None: + if self.hs.config.user_directory_search_all_users: profile = yield self.store.get_profileinfo(target_user.localpart) yield self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) - else: - yield self._notify_master_profile_change( - requester=requester, - user_id=target_user.to_string(), - ) yield self._update_join_states(requester, target_user) diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index d8487df614..19f214281e 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. from synapse.http.server import JsonResource -from synapse.replication.http import federation, membership, profile, send_event +from synapse.replication.http import federation, membership, send_event REPLICATION_PREFIX = "/_synapse/replication" @@ -27,5 +27,4 @@ class ReplicationRestResource(JsonResource): def register_servlets(self, hs): send_event.register_servlets(hs, self) membership.register_servlets(hs, self) - profile.register_servlets(hs, self) federation.register_servlets(hs, self) diff --git a/synapse/replication/http/profile.py b/synapse/replication/http/profile.py deleted file mode 100644 index aa700c2434..0000000000 --- a/synapse/replication/http/profile.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from twisted.internet import defer - -from synapse.http.servlet import parse_json_object_from_request -from synapse.replication.http._base import ReplicationEndpoint -from synapse.types import Requester, UserID - -logger = logging.getLogger(__name__) - - -class ReplicationHandleProfileChangeRestServlet(ReplicationEndpoint): - """Notifies that a users profile has changed - - Request format: - - POST /_synapse/replication/profile_changed/:user_id - - { - "requester": ... - } - """ - - NAME = "profile_changed" - PATH_ARGS = ("user_id",) - - def __init__(self, hs): - super(ReplicationHandleProfileChangeRestServlet, self).__init__(hs) - - self.user_directory_handler = hs.get_user_directory_handler() - self.store = hs.get_datastore() - self.clock = hs.get_clock() - - @staticmethod - def _serialize_payload(requester, user_id): - """ - Args: - requester (Requester) - user_id (str) - """ - - return { - "requester": requester.serialize(), - } - - @defer.inlineCallbacks - def _handle_request(self, request, user_id): - content = parse_json_object_from_request(request) - - requester = Requester.deserialize(self.store, content["requester"]) - - if requester.user: - request.authenticated_entity = requester.user.to_string() - - target_user = UserID.from_string(user_id) - - profile = yield self.store.get_profileinfo(target_user.localpart) - yield self.user_directory_handler.handle_local_profile_change( - user_id, profile - ) - - defer.returnValue((200, {})) - - -def register_servlets(hs, http_server): - ReplicationHandleProfileChangeRestServlet(hs).register(http_server) -- cgit 1.5.1 From ab822a2d1fa30514185aa257b5c6a0af39a6f5f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Aug 2018 15:30:31 +0100 Subject: Add some fixmes --- synapse/app/event_creator.py | 2 ++ synapse/handlers/user_directory.py | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'synapse') diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index a2bdaf2a2c..a34c89fa99 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -68,6 +68,8 @@ logger = logging.getLogger("synapse.app.event_creator") class EventCreatorSlavedStore( + # FIXME(#3714): We need to add UserDirectoryStore as we write directly + # rather than going via the correct worker. UserDirectoryStore, DirectoryStore, SlavedTransactionStore, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 37dda64587..d8413d6aa7 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -119,6 +119,8 @@ class UserDirectoryHandler(object): """Called to update index of our local user profiles when they change irrespective of any rooms the user may be in. """ + # FIXME(#3714): We should probably do this in the same worker as all + # the other changes. yield self.store.update_profile_in_user_dir( user_id, profile.display_name, profile.avatar_url, None, ) @@ -127,6 +129,8 @@ class UserDirectoryHandler(object): def handle_user_deactivated(self, user_id): """Called when a user ID is deactivated """ + # FIXME(#3714): We should probably do this in the same worker as all + # the other changes. yield self.store.remove_from_user_dir(user_id) yield self.store.remove_from_user_in_public_room(user_id) -- cgit 1.5.1 From ba1fbf7d5bd8807e892de6de4bc82cb5e49581bd Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 18 Aug 2018 12:31:08 +0100 Subject: special case server_notices_mxid --- synapse/server_notices/resource_limits_server_notices.py | 2 +- tests/api/test_auth.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 5987e448b2..84b91aeb5d 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -173,7 +173,7 @@ class ResourceLimitsServerNotices(object): referenced_events = pinned_state_event.content.get('pinned') events = yield self._store.get_events(referenced_events) - for event_id, event in events.iteritems(): + for event_id, event in iteritems(events): if event.type == EventTypes.ServerNoticeLimitReached: currently_blocked = True # remove event in case we need to disable blocking later on. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 022d81ce3e..c4cbff4e8d 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -476,3 +476,11 @@ class AuthTestCase(unittest.TestCase): self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED) self.assertEquals(e.exception.code, 403) + + @defer.inlineCallbacks + def test_server_notices_mxid_special_cased(self): + self.hs.config.hs_disabled = True + user = "@user:server" + self.hs.config.server_notices_mxid = user + self.hs.config.hs_disabled_message = "Reason for being disabled" + yield self.auth.check_auth_blocking(user) -- cgit 1.5.1 From c5171bf171ae28596c4e101e9dcc61bad7bcae63 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 18 Aug 2018 12:33:07 +0100 Subject: special case server_notices_mxid --- synapse/api/auth.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 022211e34e..55384d6ffe 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -783,6 +783,12 @@ class Auth(object): user_id(str|None): If present, checks for presence against existing MAU cohort """ + + # Never fail an auth check for the server notices users + # This can be a problem where event creation is prohibited due to blocking + if user_id == self.hs.config.server_notices_mxid: + return + if self.hs.config.hs_disabled: raise ResourceLimitError( 403, self.hs.config.hs_disabled_message, -- cgit 1.5.1 From e07970165f852ccbc4542f1aaf0fd1b2bc54b973 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 18 Aug 2018 14:39:45 +0100 Subject: rename error code --- synapse/api/auth.py | 4 ++-- synapse/api/errors.py | 4 ++-- tests/api/test_auth.py | 4 ++-- tests/handlers/test_sync.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 55384d6ffe..4207a48afd 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -792,7 +792,7 @@ class Auth(object): if self.hs.config.hs_disabled: raise ResourceLimitError( 403, self.hs.config.hs_disabled_message, - errcode=Codes.RESOURCE_LIMIT_EXCEED, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, admin_uri=self.hs.config.admin_uri, limit_type=self.hs.config.hs_disabled_limit_type ) @@ -809,6 +809,6 @@ class Auth(object): 403, "Monthly Active User Limit Exceeded", admin_uri=self.hs.config.admin_uri, - errcode=Codes.RESOURCE_LIMIT_EXCEED, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, limit_type="monthly_active_user" ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e26001ab12..c4ddba9889 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -56,7 +56,7 @@ class Codes(object): SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED" CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN" CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM" - RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED" + RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED" UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION" INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION" @@ -238,7 +238,7 @@ class ResourceLimitError(SynapseError): """ def __init__( self, code, msg, - errcode=Codes.RESOURCE_LIMIT_EXCEED, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, admin_uri=None, limit_type=None, ): diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index c4cbff4e8d..ed960090c4 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -458,7 +458,7 @@ class AuthTestCase(unittest.TestCase): with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) - self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED) + self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) # Ensure does not throw an error @@ -474,7 +474,7 @@ class AuthTestCase(unittest.TestCase): with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) - self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED) + self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) @defer.inlineCallbacks diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index a01ab471f5..31f54bbd7d 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -51,7 +51,7 @@ class SyncTestCase(tests.unittest.TestCase): self.hs.config.hs_disabled = True with self.assertRaises(ResourceLimitError) as e: yield self.sync_handler.wait_for_sync_for_user(sync_config) - self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED) + self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.hs.config.hs_disabled = False @@ -59,7 +59,7 @@ class SyncTestCase(tests.unittest.TestCase): with self.assertRaises(ResourceLimitError) as e: yield self.sync_handler.wait_for_sync_for_user(sync_config) - self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED) + self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def _generate_sync_config(self, user_id): return SyncConfig( -- cgit 1.5.1 From e21c368b8b7c62b2f626ff6c739ea33c3c2e73bb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Aug 2018 13:54:51 +0100 Subject: Revert spurious change --- synapse/handlers/room_member.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b6010bb41e..b8e1af580b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -341,9 +341,9 @@ class RoomMemberHandler(object): prev_events_and_hashes = yield self.store.get_prev_events_for_room( room_id, ) - latest_event_ids = [ + latest_event_ids = ( event_id for (event_id, _, _) in prev_events_and_hashes - ] + ) current_state_ids = yield self.state_handler.get_current_state_ids( room_id, latest_event_ids=latest_event_ids, -- cgit 1.5.1 From 8dee601054089311b81efd6239b458401da063e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Aug 2018 14:21:12 +0100 Subject: Remove redundant room_version checks --- synapse/handlers/federation.py | 2 +- synapse/state/__init__.py | 17 +++-------------- 2 files changed, 4 insertions(+), 15 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 75a819dd11..14a10c1229 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1814,7 +1814,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - new_state = yield self.state_handler.resolve_events( + new_state = self.state_handler.resolve_events( room_version, [list(local_view.values()), list(remote_view.values())], event diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index f1c26d1071..a2992fd953 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -262,14 +262,9 @@ class StateHandler(object): defer.returnValue(context) logger.debug("calling resolve_state_groups from compute_event_context") - if event.type == EventTypes.Create: - room_version = event.content.get("room_version", RoomVersions.V1) - else: - room_version = None entry = yield self.resolve_state_groups_for_events( event.room_id, [e for e, _ in event.prev_events], - explicit_room_version=room_version, ) prev_state_ids = entry.state @@ -337,8 +332,7 @@ class StateHandler(object): defer.returnValue(context) @defer.inlineCallbacks - def resolve_state_groups_for_events(self, room_id, event_ids, - explicit_room_version=None): + def resolve_state_groups_for_events(self, room_id, event_ids): """ Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. @@ -378,9 +372,7 @@ class StateHandler(object): delta_ids=delta_ids, )) - room_version = explicit_room_version - if not room_version: - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version(room_id) result = yield self._state_resolution_handler.resolve_state_groups( room_id, room_version, state_groups_ids, None, @@ -393,7 +385,6 @@ class StateHandler(object): ev_ids, get_prev_content=False, check_redacted=False, ) - @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( "Resolving state for %s with %d groups", event.room_id, len(state_sets) @@ -409,8 +400,6 @@ class StateHandler(object): for ev in st } - room_version = yield self.store.get_room_version(event.room_id) - with Measure(self.clock, "state._resolve_events"): new_state = resolve_events_with_state_map( room_version, state_set_ids, state_map, @@ -420,7 +409,7 @@ class StateHandler(object): key: state_map[ev_id] for key, ev_id in iteritems(new_state) } - defer.returnValue(new_state) + return new_state class StateResolutionHandler(object): -- cgit 1.5.1 From 1058d141272f17435b3548a4b12abb51a6372383 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Aug 2018 17:16:58 +0100 Subject: Make the in flight background process metrics thread safe --- synapse/metrics/background_process_metrics.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index ce678d5f75..264fb93892 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -14,6 +14,7 @@ # limitations under the License. import six +import threading from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily @@ -78,6 +79,9 @@ _background_process_counts = dict() # type: dict[str, int] # of process descriptions that no longer have any active processes. _background_processes = dict() # type: dict[str, set[_BackgroundProcess]] +# A lock that covers the above dicts +_bg_metrics_lock = threading.Lock() + class _Collector(object): """A custom metrics collector for the background process metrics. @@ -92,7 +96,11 @@ class _Collector(object): labels=["name"], ) - for desc, processes in six.iteritems(_background_processes): + # We copy the dict so that it doesn't change from underneath us + with _bg_metrics_lock: + _background_processes_copy = dict(_background_processes) + + for desc, processes in six.iteritems(_background_processes_copy): background_process_in_flight_count.add_metric( (desc,), len(processes), ) @@ -167,19 +175,26 @@ def run_as_background_process(desc, func, *args, **kwargs): """ @defer.inlineCallbacks def run(): - count = _background_process_counts.get(desc, 0) - _background_process_counts[desc] = count + 1 + with _bg_metrics_lock: + count = _background_process_counts.get(desc, 0) + _background_process_counts[desc] = count + 1 + _background_process_start_count.labels(desc).inc() with LoggingContext(desc) as context: context.request = "%s-%i" % (desc, count) proc = _BackgroundProcess(desc, context) - _background_processes.setdefault(desc, set()).add(proc) + + with _bg_metrics_lock: + _background_processes.setdefault(desc, set()).add(proc) + try: yield func(*args, **kwargs) finally: proc.update_metrics() - _background_processes[desc].remove(proc) + + with _bg_metrics_lock: + _background_processes[desc].remove(proc) with PreserveLoggingContext(): return run() -- cgit 1.5.1 From b01a75549893bf4f8f986a74a499e7cb6131868c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Aug 2018 17:27:52 +0100 Subject: Make the in flight requests metrics thread safe --- synapse/http/request_metrics.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 588e280571..284d5e05de 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +import threading from prometheus_client.core import Counter, Histogram @@ -111,6 +112,9 @@ in_flight_requests_db_sched_duration = Counter( # The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() +# Protects the _in_flight_requests set from concurrent accesss +_in_flight_reqeuests_lock = threading.Lock() + def _get_in_flight_counts(): """Returns a count of all in flight requests by (method, server_name) @@ -120,7 +124,8 @@ def _get_in_flight_counts(): """ # Cast to a list to prevent it changing while the Prometheus # thread is collecting metrics - reqs = list(_in_flight_requests) + with _in_flight_reqeuests_lock: + reqs = list(_in_flight_requests) for rm in reqs: rm.update_metrics() @@ -154,10 +159,12 @@ class RequestMetrics(object): # to the "in flight" metrics. self._request_stats = self.start_context.get_resource_usage() - _in_flight_requests.add(self) + with _in_flight_reqeuests_lock: + _in_flight_requests.add(self) def stop(self, time_sec, request): - _in_flight_requests.discard(self) + with _in_flight_reqeuests_lock: + _in_flight_requests.discard(self) context = LoggingContext.current_context() -- cgit 1.5.1 From f89f6b7c09eb8607c6744db839151452ca44c18e Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 20 Aug 2018 18:25:54 +0100 Subject: Allow guests to access /rooms/:roomId/event/:eventId --- synapse/rest/client/v1/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index fcc1091760..976d98387d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -531,7 +531,7 @@ class RoomEventServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): - requester = yield self.auth.get_user_by_req(request) + requester = yield self.auth.get_user_by_req(request, allow_guest=True) event = yield self.event_handler.get_event(requester.user, room_id, event_id) time_now = self.clock.time_msec() -- cgit 1.5.1 From 3f6762f0bbeb611714ba6c9c4329ea7d5e46cbda Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Aug 2018 09:38:38 +0100 Subject: isort --- synapse/metrics/background_process_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 264fb93892..167167be0a 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import threading +import six + from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily from twisted.internet import defer -- cgit 1.5.1 From 808d8e06aa6a3bdd9016f82d98088111e5741f4c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Aug 2018 11:09:46 +0100 Subject: Don't log exceptions when failing to fetch server keys Not being able to resolve or connect to remote servers is an expected error, so we shouldn't log at ERROR with stacktraces. --- synapse/crypto/keyclient.py | 8 ++++++-- synapse/federation/transport/server.py | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index c20a32096a..e94400b8e2 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -18,7 +18,9 @@ import logging from canonicaljson import json from twisted.internet import defer, reactor +from twisted.internet.error import ConnectError from twisted.internet.protocol import Factory +from twisted.names.error import DomainError from twisted.web.http import HTTPClient from synapse.http.endpoint import matrix_federation_endpoint @@ -47,12 +49,14 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): server_response, server_certificate = yield protocol.remote_key defer.returnValue((server_response, server_certificate)) except SynapseKeyClientError as e: - logger.exception("Error getting key for %r" % (server_name,)) + logger.warn("Error getting key for %r: %s", server_name, e) if e.status.startswith("4"): # Don't retry for 4xx responses. raise IOError("Cannot get key for %r" % server_name) + except (ConnectError, DomainError) as e: + logger.warn("Error getting key for %r: %s", server_name, e) except Exception as e: - logger.exception(e) + logger.exception("Error getting key for %r", server_name) raise IOError("Cannot get key for %r" % server_name) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 77969a4f38..33a37d4498 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -261,10 +261,10 @@ class BaseFederationServlet(object): except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: - logger.exception("authenticate_request failed") + logger.warn("authenticate_request failed") raise except Exception: - logger.exception("authenticate_request failed") + logger.warn("authenticate_request failed") raise if origin: -- cgit 1.5.1 From c2c153dd3b97ec98ec7ef295deeb466d72027a01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Aug 2018 11:41:07 +0100 Subject: Log more detail when we fail to authenticate request --- synapse/federation/transport/server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 33a37d4498..7a993fd1cf 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -261,10 +261,10 @@ class BaseFederationServlet(object): except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: - logger.warn("authenticate_request failed") + logger.warn("authenticate_request failed: missing authentication") raise - except Exception: - logger.warn("authenticate_request failed") + except Exception as e: + logger.warn("authenticate_request failed: %s", e) raise if origin: -- cgit 1.5.1 From 8550a7e9c2fcf9e46c717127d14c79010350a998 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 21 Aug 2018 10:38:00 -0400 Subject: allow auth_data to be any JSON instead of a string --- synapse/storage/e2e_room_keys.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 089989fcfa..c7b1fad21e 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -15,6 +15,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError +import simplejson as json from ._base import SQLBaseStore @@ -215,7 +216,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): else: this_version = version - return self._simple_select_one_txn( + result = self._simple_select_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ @@ -228,6 +229,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): "auth_data", ), ) + result["auth_data"] = json.loads(result["auth_data"]) + return result return self.runInteraction( "get_e2e_room_keys_version_info", @@ -264,7 +267,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "user_id": user_id, "version": new_version, "algorithm": info["algorithm"], - "auth_data": info["auth_data"], + "auth_data": json.dumps(info["auth_data"]), }, ) -- cgit 1.5.1 From cd6937fb266b72080f73a7a41da761f7f8775d96 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Aug 2018 16:28:10 +0100 Subject: Fix typo --- synapse/http/request_metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 284d5e05de..72c2654678 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -113,7 +113,7 @@ in_flight_requests_db_sched_duration = Counter( _in_flight_requests = set() # Protects the _in_flight_requests set from concurrent accesss -_in_flight_reqeuests_lock = threading.Lock() +_in_flight_requests_lock = threading.Lock() def _get_in_flight_counts(): @@ -124,7 +124,7 @@ def _get_in_flight_counts(): """ # Cast to a list to prevent it changing while the Prometheus # thread is collecting metrics - with _in_flight_reqeuests_lock: + with _in_flight_requests_lock: reqs = list(_in_flight_requests) for rm in reqs: @@ -159,11 +159,11 @@ class RequestMetrics(object): # to the "in flight" metrics. self._request_stats = self.start_context.get_resource_usage() - with _in_flight_reqeuests_lock: + with _in_flight_requests_lock: _in_flight_requests.add(self) def stop(self, time_sec, request): - with _in_flight_reqeuests_lock: + with _in_flight_requests_lock: _in_flight_requests.discard(self) context = LoggingContext.current_context() -- cgit 1.5.1 From 42a394caa2884096e5afe1f1c2c75680792769d8 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 21 Aug 2018 14:51:34 -0400 Subject: allow session_data to be any JSON instead of just a string --- synapse/storage/e2e_room_keys.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index c7b1fad21e..b695570a7b 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -58,6 +58,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) + row["session_data"] = json.loads(row["session_data"]); + defer.returnValue(row) @defer.inlineCallbacks @@ -87,7 +89,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "first_message_index": room_key['first_message_index'], "forwarded_count": room_key['forwarded_count'], "is_verified": room_key['is_verified'], - "session_data": room_key['session_data'], + "session_data": json.dumps(room_key['session_data']), }, lock=False, ) @@ -145,7 +147,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "first_message_index": row["first_message_index"], "forwarded_count": row["forwarded_count"], "is_verified": row["is_verified"], - "session_data": row["session_data"], + "session_data": json.loads(row["session_data"]), } defer.returnValue(sessions) -- cgit 1.5.1 From bb81e78ec6c05edc95b25a954bdf1cf688d4d652 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 22 Aug 2018 00:56:37 +0200 Subject: Split the state_group_cache in two (#3726) Splits the state_group_cache in two. One half contains normal state events; the other contains member events. The idea is that the lazyloading common case of: "I want a subset of member events plus all of the other state" can be accomplished efficiently by splitting the cache into two, and asking for "all events" from the non-members cache, and "just these keys" from the members cache. This means we can avoid having to make DictionaryCache aware of these sort of complicated queries, whilst letting LL requests benefit from the caching. Previously we were unable to sensibly use the caching and had to pull all state from the DB irrespective of the filtering, which made things slow. Hopefully fixes https://github.com/matrix-org/synapse/issues/3720. --- changelog.d/3726.misc | 1 + synapse/storage/state.py | 158 +++++++++++++++++++++++++++++++++++++++----- tests/storage/test_state.py | 105 ++++++++++++++++++++++++++--- 3 files changed, 236 insertions(+), 28 deletions(-) create mode 100644 changelog.d/3726.misc (limited to 'synapse') diff --git a/changelog.d/3726.misc b/changelog.d/3726.misc new file mode 100644 index 0000000000..c4f66ec998 --- /dev/null +++ b/changelog.d/3726.misc @@ -0,0 +1 @@ +Split the state_group_cache into member and non-member state events (and so speed up LL /sync) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index dd03c4168b..4b971efdba 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -60,8 +60,43 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def __init__(self, db_conn, hs): super(StateGroupWorkerStore, self).__init__(db_conn, hs) + # Originally the state store used a single DictionaryCache to cache the + # event IDs for the state types in a given state group to avoid hammering + # on the state_group* tables. + # + # The point of using a DictionaryCache is that it can cache a subset + # of the state events for a given state group (i.e. a subset of the keys for a + # given dict which is an entry in the cache for a given state group ID). + # + # However, this poses problems when performing complicated queries + # on the store - for instance: "give me all the state for this group, but + # limit members to this subset of users", as DictionaryCache's API isn't + # rich enough to say "please cache any of these fields, apart from this subset". + # This is problematic when lazy loading members, which requires this behaviour, + # as without it the cache has no choice but to speculatively load all + # state events for the group, which negates the efficiency being sought. + # + # Rather than overcomplicating DictionaryCache's API, we instead split the + # state_group_cache into two halves - one for tracking non-member events, + # and the other for tracking member_events. This means that lazy loading + # queries can be made in a cache-friendly manner by querying both caches + # separately and then merging the result. So for the example above, you + # would query the members cache for a specific subset of state keys + # (which DictionaryCache will handle efficiently and fine) and the non-members + # cache for all state (which DictionaryCache will similarly handle fine) + # and then just merge the results together. + # + # We size the non-members cache to be smaller than the members cache as the + # vast majority of state in Matrix (today) is member events. + self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache") + "*stateGroupCache*", + # TODO: this hasn't been tuned yet + 50000 * get_cache_factor_for("stateGroupCache") + ) + self._state_group_members_cache = DictionaryCache( + "*stateGroupMembersCache*", + 500000 * get_cache_factor_for("stateGroupMembersCache") ) @defer.inlineCallbacks @@ -275,7 +310,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): }) @defer.inlineCallbacks - def _get_state_groups_from_groups(self, groups, types): + def _get_state_groups_from_groups(self, groups, types, members=None): """Returns the state groups for a given set of groups, filtering on types of state events. @@ -284,6 +319,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): types (Iterable[str, str|None]|None): list of 2-tuples of the form (`type`, `state_key`), where a `state_key` of `None` matches all state_keys for the `type`. If None, all types are returned. + members (bool|None): If not None, then, in addition to any filtering + implied by types, the results are also filtered to only include + member events (if True), or to exclude member events (if False) Returns: dictionary state_group -> (dict of (type, state_key) -> event id) @@ -294,14 +332,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for chunk in chunks: res = yield self.runInteraction( "_get_state_groups_from_groups", - self._get_state_groups_from_groups_txn, chunk, types, + self._get_state_groups_from_groups_txn, chunk, types, members, ) results.update(res) defer.returnValue(results) def _get_state_groups_from_groups_txn( - self, txn, groups, types=None, + self, txn, groups, types=None, members=None, ): results = {group: {} for group in groups} @@ -339,6 +377,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): %s """) + if members is True: + sql += " AND type = '%s'" % (EventTypes.Member,) + elif members is False: + sql += " AND type <> '%s'" % (EventTypes.Member,) + # Turns out that postgres doesn't like doing a list of OR's and # is about 1000x slower, so we just issue a query for each specific # type seperately. @@ -386,6 +429,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): else: where_clause = "" + if members is True: + where_clause += " AND type = '%s'" % EventTypes.Member + elif members is False: + where_clause += " AND type <> '%s'" % EventTypes.Member + # We don't use WITH RECURSIVE on sqlite3 as there are distributions # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) for group in groups: @@ -580,10 +628,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) - def _get_some_state_from_cache(self, group, types, filtered_types=None): + def _get_some_state_from_cache(self, cache, group, types, filtered_types=None): """Checks if group is in cache. See `_get_state_for_groups` Args: + cache(DictionaryCache): the state group cache to use group(int): The state group to lookup types(list[str, str|None]): List of 2-tuples of the form (`type`, `state_key`), where a `state_key` of `None` matches all @@ -597,11 +646,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): requests state from the cache, if False we need to query the DB for the missing state. """ - is_all, known_absent, state_dict_ids = self._state_group_cache.get(group) + is_all, known_absent, state_dict_ids = cache.get(group) type_to_key = {} - # tracks whether any of ourrequested types are missing from the cache + # tracks whether any of our requested types are missing from the cache missing_types = False for typ, state_key in types: @@ -648,7 +697,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): if include(k[0], k[1]) }, got_all - def _get_all_state_from_cache(self, group): + def _get_all_state_from_cache(self, cache, group): """Checks if group is in cache. See `_get_state_for_groups` Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool @@ -656,9 +705,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): cache, if False we need to query the DB for the missing state. Args: + cache(DictionaryCache): the state group cache to use group: The state group to lookup """ - is_all, _, state_dict_ids = self._state_group_cache.get(group) + is_all, _, state_dict_ids = cache.get(group) return state_dict_ids, is_all @@ -681,6 +731,62 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): list of event types. Other types of events are returned unfiltered. If None, `types` filtering is applied to all events. + Returns: + Deferred[dict[int, dict[(type, state_key), EventBase]]] + a dictionary mapping from state group to state dictionary. + """ + if types is not None: + non_member_types = [t for t in types if t[0] != EventTypes.Member] + + if filtered_types is not None and EventTypes.Member not in filtered_types: + # we want all of the membership events + member_types = None + else: + member_types = [t for t in types if t[0] == EventTypes.Member] + + else: + non_member_types = None + member_types = None + + non_member_state = yield self._get_state_for_groups_using_cache( + groups, self._state_group_cache, non_member_types, filtered_types, + ) + # XXX: we could skip this entirely if member_types is [] + member_state = yield self._get_state_for_groups_using_cache( + # we set filtered_types=None as member_state only ever contain members. + groups, self._state_group_members_cache, member_types, None, + ) + + state = non_member_state + for group in groups: + state[group].update(member_state[group]) + + defer.returnValue(state) + + @defer.inlineCallbacks + def _get_state_for_groups_using_cache( + self, groups, cache, types=None, filtered_types=None + ): + """Gets the state at each of a list of state groups, optionally + filtering by type/state_key, querying from a specific cache. + + Args: + groups (iterable[int]): list of state groups for which we want + to get the state. + cache (DictionaryCache): the cache of group ids to state dicts which + we will pass through - either the normal state cache or the specific + members state cache. + types (None|iterable[(str, None|str)]): + indicates the state type/keys required. If None, the whole + state is fetched and returned. + + Otherwise, each entry should be a `(type, state_key)` tuple to + include in the response. A `state_key` of None is a wildcard + meaning that we require all state with that type. + filtered_types(list[str]|None): Only apply filtering via `types` to this + list of event types. Other types of events are returned unfiltered. + If None, `types` filtering is applied to all events. + Returns: Deferred[dict[int, dict[(type, state_key), EventBase]]] a dictionary mapping from state group to state dictionary. @@ -692,7 +798,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): if types is not None: for group in set(groups): state_dict_ids, got_all = self._get_some_state_from_cache( - group, types, filtered_types + cache, group, types, filtered_types ) results[group] = state_dict_ids @@ -701,7 +807,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): else: for group in set(groups): state_dict_ids, got_all = self._get_all_state_from_cache( - group + cache, group ) results[group] = state_dict_ids @@ -710,8 +816,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): missing_groups.append(group) if missing_groups: - # Okay, so we have some missing_types, lets fetch them. - cache_seq_num = self._state_group_cache.sequence + # Okay, so we have some missing_types, let's fetch them. + cache_seq_num = cache.sequence # the DictionaryCache knows if it has *all* the state, but # does not know if it has all of the keys of a particular type, @@ -725,7 +831,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): types_to_fetch = types group_to_state_dict = yield self._get_state_groups_from_groups( - missing_groups, types_to_fetch + missing_groups, types_to_fetch, cache == self._state_group_members_cache, ) for group, group_state_dict in iteritems(group_to_state_dict): @@ -745,7 +851,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # update the cache with all the things we fetched from the # database. - self._state_group_cache.update( + cache.update( cache_seq_num, key=group, value=group_state_dict, @@ -847,15 +953,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ], ) - # Prefill the state group cache with this group. + # Prefill the state group caches with this group. # It's fine to use the sequence like this as the state group map # is immutable. (If the map wasn't immutable then this prefill could # race with another update) + + current_member_state_ids = { + s: ev + for (s, ev) in iteritems(current_state_ids) + if s[0] == EventTypes.Member + } + txn.call_after( + self._state_group_members_cache.update, + self._state_group_members_cache.sequence, + key=state_group, + value=dict(current_member_state_ids), + ) + + current_non_member_state_ids = { + s: ev + for (s, ev) in iteritems(current_state_ids) + if s[0] != EventTypes.Member + } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, - value=dict(current_state_ids), + value=dict(current_non_member_state_ids), ) return state_group diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index ebfd969b36..d717b9f94e 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -185,6 +185,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) @@ -197,8 +198,20 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, [], filtered_types=[EventTypes.Member] + ) + + self.assertEqual(is_all, True) + self.assertDictEqual( + {}, + state_dict, + ) + # test _get_some_state_from_cache correctly filters in members with wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] ) @@ -207,6 +220,18 @@ class StateStoreTestCase(tests.unittest.TestCase): { (e1.type, e1.state_key): e1.event_id, (e2.type, e2.state_key): e2.event_id, + }, + state_dict, + ) + + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + ) + + self.assertEqual(is_all, True) + self.assertDictEqual( + { (e3.type, e3.state_key): e3.event_id, # e4 is overwritten by e5 (e5.type, e5.state_key): e5.event_id, @@ -216,6 +241,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members with specific types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [(EventTypes.Member, e5.state_key)], filtered_types=[EventTypes.Member], @@ -226,6 +252,20 @@ class StateStoreTestCase(tests.unittest.TestCase): { (e1.type, e1.state_key): e1.event_id, (e2.type, e2.state_key): e2.event_id, + }, + state_dict, + ) + + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=[EventTypes.Member], + ) + + self.assertEqual(is_all, True) + self.assertDictEqual( + { (e5.type, e5.state_key): e5.event_id, }, state_dict, @@ -234,6 +274,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, group, [(EventTypes.Member, e5.state_key)], filtered_types=None ) @@ -254,9 +295,6 @@ class StateStoreTestCase(tests.unittest.TestCase): { (e1.type, e1.state_key): e1.event_id, (e2.type, e2.state_key): e2.event_id, - (e3.type, e3.state_key): e3.event_id, - # e4 is overwritten by e5 - (e5.type, e5.state_key): e5.event_id, }, ) @@ -269,8 +307,6 @@ class StateStoreTestCase(tests.unittest.TestCase): # list fetched keys so it knows it's partial fetched_keys=( (e1.type, e1.state_key), - (e3.type, e3.state_key), - (e5.type, e5.state_key), ), ) @@ -284,8 +320,6 @@ class StateStoreTestCase(tests.unittest.TestCase): set( [ (e1.type, e1.state_key), - (e3.type, e3.state_key), - (e5.type, e5.state_key), ] ), ) @@ -293,8 +327,6 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict_ids, { (e1.type, e1.state_key): e1.event_id, - (e3.type, e3.state_key): e3.event_id, - (e5.type, e5.state_key): e5.event_id, }, ) @@ -304,14 +336,25 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + room_id = self.room.to_string() + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, [], filtered_types=[EventTypes.Member] + ) + + self.assertEqual(is_all, True) + self.assertDictEqual({}, state_dict) + # test _get_some_state_from_cache correctly filters in members wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] ) @@ -319,8 +362,19 @@ class StateStoreTestCase(tests.unittest.TestCase): self.assertDictEqual( { (e1.type, e1.state_key): e1.event_id, + }, + state_dict, + ) + + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + ) + + self.assertEqual(is_all, True) + self.assertDictEqual( + { (e3.type, e3.state_key): e3.event_id, - # e4 is overwritten by e5 (e5.type, e5.state_key): e5.event_id, }, state_dict, @@ -328,6 +382,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members with specific types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, group, [(EventTypes.Member, e5.state_key)], filtered_types=[EventTypes.Member], @@ -337,6 +392,20 @@ class StateStoreTestCase(tests.unittest.TestCase): self.assertDictEqual( { (e1.type, e1.state_key): e1.event_id, + }, + state_dict, + ) + + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=[EventTypes.Member], + ) + + self.assertEqual(is_all, True) + self.assertDictEqual( + { (e5.type, e5.state_key): e5.event_id, }, state_dict, @@ -345,8 +414,22 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_cache, + group, [(EventTypes.Member, e5.state_key)], filtered_types=None + ) + + self.assertEqual(is_all, False) + self.assertDictEqual({}, state_dict) + + (state_dict, is_all) = yield self.store._get_some_state_from_cache( + self.store._state_group_members_cache, group, [(EventTypes.Member, e5.state_key)], filtered_types=None ) self.assertEqual(is_all, True) - self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) + self.assertDictEqual( + { + (e5.type, e5.state_key): e5.event_id, + }, + state_dict, + ) -- cgit 1.5.1 From 47b25ba5f3040d07f8be80584b292ca0386fdb30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Aug 2018 10:09:05 +0100 Subject: Remove redundant vars --- synapse/handlers/profile.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 3e1d95d5e5..ec9cb1f50c 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -33,9 +33,6 @@ logger = logging.getLogger(__name__) class WorkerProfileHandler(BaseHandler): - PROFILE_UPDATE_MS = 60 * 1000 - PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 - def __init__(self, hs): super(WorkerProfileHandler, self).__init__(hs) -- cgit 1.5.1 From a81f1408807683d212b7bf3c4f5f36df4b0e0d33 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Aug 2018 10:11:21 +0100 Subject: Add assert to ensure handler is only run on master --- synapse/handlers/profile.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse') diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index ec9cb1f50c..278f131d5e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -282,6 +282,8 @@ class MasterProfileHandler(WorkerProfileHandler): def __init__(self, hs): super(MasterProfileHandler, self).__init__(hs) + assert hs.config.worker_app is None + self.clock.looping_call( self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS, ) -- cgit 1.5.1 From 8432e2ebd76d0462d39a329967671e845da7e404 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Aug 2018 10:13:40 +0100 Subject: Rename WorkerProfileHandler to BaseProfileHandler --- synapse/handlers/profile.py | 13 ++++++++++--- synapse/server.py | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 278f131d5e..75b8b7ce6a 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -32,9 +32,16 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -class WorkerProfileHandler(BaseHandler): +class BaseProfileHandler(BaseHandler): + """Handles fetching and updating user profile information. + + BaseProfileHandler can be instantiated directly on workers and will + delegate to master when necessary. The master process should use the + subclass MasterProfileHandler + """ + def __init__(self, hs): - super(WorkerProfileHandler, self).__init__(hs) + super(BaseProfileHandler, self).__init__(hs) self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -275,7 +282,7 @@ class WorkerProfileHandler(BaseHandler): ) -class MasterProfileHandler(WorkerProfileHandler): +class MasterProfileHandler(BaseProfileHandler): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 diff --git a/synapse/server.py b/synapse/server.py index fcfa7e6557..a6fbc6ec0c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -56,7 +56,7 @@ from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler from synapse.handlers.presence import PresenceHandler -from synapse.handlers.profile import MasterProfileHandler, WorkerProfileHandler +from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.room import RoomContextHandler, RoomCreationHandler @@ -309,7 +309,7 @@ class HomeServer(object): def build_profile_handler(self): if self.config.worker_app: - return WorkerProfileHandler(self) + return BaseProfileHandler(self) else: return MasterProfileHandler(self) -- cgit 1.5.1 From 5c261107c9b5bf82a51c27fd6cb85e2199e539e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Aug 2018 15:34:56 +0100 Subject: Remove unnecessary resolve_events_with_state_map We only ever used the synchronous resolve_events_with_state_map in one place, which is trivial to replace with the async version. --- synapse/handlers/federation.py | 2 +- synapse/state/__init__.py | 34 ++++++---------------------------- synapse/state/v1.py | 28 ---------------------------- 3 files changed, 7 insertions(+), 57 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0ebf0fd188..3fa7a98445 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1831,7 +1831,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - new_state = self.state_handler.resolve_events( + new_state = yield self.state_handler.resolve_events( room_version, [list(local_view.values()), list(remote_view.values())], event diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b34970e4d1..d7ae22a661 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -385,6 +385,7 @@ class StateHandler(object): ev_ids, get_prev_content=False, check_redacted=False, ) + @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( "Resolving state for %s with %d groups", event.room_id, len(state_sets) @@ -401,15 +402,17 @@ class StateHandler(object): } with Measure(self.clock, "state._resolve_events"): - new_state = resolve_events_with_state_map( - room_version, state_set_ids, state_map, + new_state = yield resolve_events_with_factory( + room_version, state_set_ids, + event_map=state_map, + state_map_factory=self._state_map_factory ) new_state = { key: state_map[ev_id] for key, ev_id in iteritems(new_state) } - return new_state + defer.returnValue(new_state) class StateResolutionHandler(object): @@ -589,31 +592,6 @@ def _make_state_cache_entry( ) -def resolve_events_with_state_map(room_version, state_sets, state_map): - """ - Args: - room_version(str): Version of the room - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): - return v1.resolve_events_with_state_map( - state_sets, state_map, - ) - else: - # This should only happen if we added a version but forgot to add it to - # the list above. - raise Exception( - "No state resolution algorithm defined for version %r" % (room_version,) - ) - - def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): """ Args: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 3a1f7054a1..c95477d318 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,34 +30,6 @@ logger = logging.getLogger(__name__) POWER_KEY = (EventTypes.PowerLevels, "") -def resolve_events_with_state_map(state_sets, state_map): - """ - Args: - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if len(state_sets) == 1: - return state_sets[0] - - unconflicted_state, conflicted_state = _seperate( - state_sets, - ) - - auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map - ) - - return _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) - - @defer.inlineCallbacks def resolve_events_with_factory(state_sets, event_map, state_map_factory): """ -- cgit 1.5.1 From 9643a6f7f208f52febc4be7edb1e38f4ff077b4d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Aug 2018 17:00:29 +0100 Subject: Update notice format --- synapse/api/constants.py | 6 ++++-- synapse/server_notices/resource_limits_server_notices.py | 14 +++++++++++--- .../server_notices/test_resource_limits_server_notices.py | 12 +++++++++--- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 1fb24578e2..a67862f4ed 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -80,8 +80,6 @@ class EventTypes(object): ServerACL = "m.room.server_acl" Pinned = "m.room.pinned_events" - ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" - class RejectedReason(object): AUTH_ERROR = "auth_error" @@ -106,3 +104,7 @@ DEFAULT_ROOM_VERSION = "1" # vdh-test-version is a placeholder to get room versioning support working and tested # until we have a working v2. KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"} + + +ServerNoticeMsgType = "m.server_notice" +ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 84b91aeb5d..575697e54b 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -18,7 +18,11 @@ from six import iteritems from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import ( + EventTypes, + ServerNoticeLimitReached, + ServerNoticeMsgType, +) from synapse.api.errors import AuthError, ResourceLimitError, SynapseError from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG @@ -102,11 +106,13 @@ class ResourceLimitsServerNotices(object): # Add block notification content = { 'body': event_content, + 'msgtype': ServerNoticeMsgType, + 'server_notice_type': ServerNoticeLimitReached, 'admin_uri': self._config.admin_uri, 'limit_type': event_limit_type } event = yield self._server_notices_manager.send_notice( - user_id, content, EventTypes.ServerNoticeLimitReached + user_id, content, EventTypes.Message, ) content = { @@ -174,7 +180,9 @@ class ResourceLimitsServerNotices(object): events = yield self._store.get_events(referenced_events) for event_id, event in iteritems(events): - if event.type == EventTypes.ServerNoticeLimitReached: + if event.type != EventTypes.Message: + continue + if event.content.get("msgtype") == ServerNoticeMsgType: currently_blocked = True # remove event in case we need to disable blocking later on. if event_id in referenced_events: diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index cc8d2f539c..ca9b31128a 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -2,7 +2,7 @@ from mock import Mock from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError from synapse.handlers.auth import AuthHandler from synapse.server_notices.resource_limits_server_notices import ( @@ -78,7 +78,10 @@ class TestResourceLimitsServerNotices(unittest.TestCase): """Test when user has blocked notice, but should have it removed""" self._rlsn._auth.check_auth_blocking = Mock() - mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) + mock_event = Mock( + type=EventTypes.Message, + content={"msgtype": ServerNoticeMsgType}, + ) self._rlsn._store.get_events = Mock(return_value=defer.succeed( {"123": mock_event} )) @@ -94,7 +97,10 @@ class TestResourceLimitsServerNotices(unittest.TestCase): side_effect=ResourceLimitError(403, 'foo') ) - mock_event = Mock(type=EventTypes.ServerNoticeLimitReached) + mock_event = Mock( + type=EventTypes.Message, + content={"msgtype": ServerNoticeMsgType}, + ) self._rlsn._store.get_events = Mock(return_value=defer.succeed( {"123": mock_event} )) -- cgit 1.5.1 From 7a0da69eee49ce84ddd8a433288d7ce9cb76e14e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Aug 2018 10:28:10 +0100 Subject: Add missing yield --- synapse/storage/monthly_active_users.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 06f9a75a97..fd3b630bd2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -147,6 +147,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): return count return self.runInteraction("count_users", _count_users) + @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): """ Updates or inserts monthly active user member @@ -155,7 +156,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): Deferred[bool]: True if a new entry was created, False if an existing one was updated. """ - is_insert = self._simple_upsert( + is_insert = yield self._simple_upsert( desc="upsert_monthly_active_user", table="monthly_active_users", keyvalues={ -- cgit 1.5.1 From 7e6e588e60708be3e58f026bd65bd4680f9cd969 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Aug 2018 16:20:51 +0100 Subject: Fix bug where we resent "limit exceeded" server notices This was due to a bug where we mutated a cached event's contents --- .../resource_limits_server_notices.py | 6 +- .../test_resource_limits_server_notices.py | 66 ++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 575697e54b..96eb97771f 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -76,6 +76,10 @@ class ResourceLimitsServerNotices(object): room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + if not room_id: + logger.warn("Failed to get server notices room") + return + yield self._check_and_set_tags(user_id, room_id) currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id) @@ -176,7 +180,7 @@ class ResourceLimitsServerNotices(object): referenced_events = [] if pinned_state_event is not None: - referenced_events = pinned_state_event.content.get('pinned') + referenced_events = list(pinned_state_event.content.get('pinned', [])) events = yield self._store.get_events(referenced_events) for event_id, event in iteritems(events): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index ca9b31128a..fede3f52a9 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -144,3 +144,69 @@ class TestResourceLimitsServerNotices(unittest.TestCase): yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() + + +class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): + @defer.inlineCallbacks + def setUp(self): + self.hs = yield setup_test_homeserver(self.addCleanup) + self.store = self.hs.get_datastore() + self.server_notices_sender = self.hs.get_server_notices_sender() + self.server_notices_manager = self.hs.get_server_notices_manager() + self.event_source = self.hs.get_event_sources() + + # relying on [1] is far from ideal, but the only case where + # ResourceLimitsServerNotices class needs to be isolated is this test, + # general code should never have a reason to do so ... + self._rlsn = self.server_notices_sender._server_notices[1] + if not isinstance(self._rlsn, ResourceLimitsServerNotices): + raise Exception("Failed to find reference to ResourceLimitsServerNotices") + + self.hs.config.limit_usage_by_mau = True + self.hs.config.hs_disabled = False + self.hs.config.max_mau_value = 5 + self.hs.config.server_notices_mxid = "@server:test" + self.hs.config.server_notices_mxid_display_name = None + self.hs.config.server_notices_mxid_avatar_url = None + self.hs.config.server_notices_room_name = "Test Server Notice Room" + + self.user_id = "@user_id:test" + + self.hs.config.admin_uri = "mailto:user@test.com" + + @defer.inlineCallbacks + def test_server_notice_only_sent_once(self): + self.store.get_monthly_active_count = Mock( + return_value=1000, + ) + + self.store.user_last_seen_monthly_active = Mock( + return_value=1000, + ) + + # Call the function multiple times to ensure we only send the notice once + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) + + # Now lets get the last load of messages in the service notice room and + # check that there is only one server notice + room_id = yield self.server_notices_manager.get_notice_room_for_user( + self.user_id, + ) + + token = yield self.event_source.get_current_token() + events, _ = yield self.store.get_recent_events_for_room( + room_id, limit=100, end_token=token.room_key, + ) + + count = 0 + for event in events: + if event.type != EventTypes.Message: + continue + if event.content.get("msgtype") != ServerNoticeMsgType: + continue + + count += 1 + + self.assertEqual(count, 1) -- cgit 1.5.1 From cd77270a669fa827912efd86a0723d85e9e388ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Aug 2018 19:17:08 +0100 Subject: Implement trail users --- synapse/api/auth.py | 6 +++++- synapse/config/server.py | 6 ++++++ synapse/storage/monthly_active_users.py | 5 +++++ synapse/storage/registration.py | 28 +++++++++++++++++++++++++++- tests/storage/test_registration.py | 1 + 5 files changed, 44 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 4ca40a0f71..8d2aa5870a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -797,11 +797,15 @@ class Auth(object): limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: - # If the user is already part of the MAU cohort + # If the user is already part of the MAU cohort or a trial user if user_id: timestamp = yield self.store.user_last_seen_monthly_active(user_id) if timestamp: return + + is_trial = yield self.store.is_trial_user(user_id) + if is_trial: + return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/config/server.py b/synapse/config/server.py index 68a612e594..8eecd28e7d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -77,10 +77,15 @@ class ServerConfig(Config): self.max_mau_value = config.get( "max_mau_value", 0, ) + self.mau_limits_reserved_threepids = config.get( "mau_limit_reserved_threepids", [] ) + self.mau_trial_days = config.get( + "mau_trial_days", 0, + ) + # Options to disable HS self.hs_disabled = config.get("hs_disabled", False) self.hs_disabled_message = config.get("hs_disabled_message", "") @@ -365,6 +370,7 @@ class ServerConfig(Config): # Enables monthly active user checking # limit_usage_by_mau: False # max_mau_value: 50 + # mau_trial_days: 2 # # Sometimes the server admin will want to ensure certain accounts are # never blocked by mau checking. These accounts are specified here. diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index fd3b630bd2..d178f5c5ba 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -201,6 +201,11 @@ class MonthlyActiveUsersStore(SQLBaseStore): user_id(str): the user_id to query """ if self.hs.config.limit_usage_by_mau: + is_trial = yield self.is_trial_user(user_id) + if is_trial: + # we don't track trial users in the MAU table. + return + last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id) now = self.hs.get_clock().time_msec() diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 07333f777d..26b429e307 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -26,6 +26,11 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks class RegistrationWorkerStore(SQLBaseStore): + def __init__(self, db_conn, hs): + super(RegistrationWorkerStore, self).__init__(db_conn, hs) + + self.config = hs.config + @cached() def get_user_by_id(self, user_id): return self._simple_select_one( @@ -36,12 +41,33 @@ class RegistrationWorkerStore(SQLBaseStore): retcols=[ "name", "password_hash", "is_guest", "consent_version", "consent_server_notice_sent", - "appservice_id", + "appservice_id", "creation_ts", ], allow_none=True, desc="get_user_by_id", ) + @defer.inlineCallbacks + def is_trial_user(self, user_id): + """Checks if user is in the "trial" period, i.e. within the first + N days of registration defined by `mau_trial_days` config + + Args: + user_id (str) + + Returns: + Deferred[bool] + """ + + info = yield self.get_user_by_id(user_id) + if not info: + defer.returnValue(False) + + now = self.clock.time_msec() + trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 + is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms + defer.returnValue(is_trial) + @cached() def get_user_by_access_token(self, token): """Get a user from the given access token. diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 4eda122edc..3dfb7b903a 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -46,6 +46,7 @@ class RegistrationStoreTestCase(unittest.TestCase): "consent_version": None, "consent_server_notice_sent": None, "appservice_id": None, + "creation_ts": 1000, }, (yield self.store.get_user_by_id(self.user_id)), ) -- cgit 1.5.1 From f584d6108fb9f9e2650404ce6b42fad805ddb9ca Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 24 Aug 2018 10:02:31 +0100 Subject: Pin to twisted 17.1 or later Fixes https://github.com/matrix-org/synapse/issues/3741. --- synapse/python_dependencies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 987eec3ef2..9c55e79ef5 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -39,7 +39,7 @@ REQUIREMENTS = { "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], - "Twisted>=16.0.0": ["twisted>=16.0.0"], + "Twisted>=17.1.0": ["twisted>=17.1.0"], # We use crypto.get_elliptic_curve which is only supported in >=0.15 "pyopenssl>=0.15": ["OpenSSL>=0.15"], -- cgit 1.5.1 From 7f147d623b981644d22cd4db7db0e7baf33b0e2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 11:31:31 +0100 Subject: Fix bug where we broke sync when using limit_usage_by_mau We assumed that we always had service notices configured, but that is not always true --- synapse/server_notices/resource_limits_server_notices.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 96eb97771f..98495ddbf9 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -66,6 +66,10 @@ class ResourceLimitsServerNotices(object): if self._config.limit_usage_by_mau is False: return + if not self._server_notices_manager.is_enabled(): + # Don't try and send server notices unles they've been enabled + return + timestamp = yield self._store.user_last_seen_monthly_active(user_id) if timestamp is None: # This user will be blocked from receiving the notice anyway. -- cgit 1.5.1 From 1d67b13674c681b0e228049e34fa2fbd43703bb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 12:10:01 +0100 Subject: Fix bug when federation_domain_whitelist is an emtpy list Outbound federation were incorrectly allowed when the config option was set to an empty list --- synapse/http/matrixfederationclient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 44b61e70a4..b34bb8e31a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -133,7 +133,7 @@ class MatrixFederationHttpClient(object): failures, connection failures, SSL failures.) """ if ( - self.hs.config.federation_domain_whitelist and + self.hs.config.federation_domain_whitelist is not None and destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(destination) -- cgit 1.5.1 From 14d49c51dbbf1222477e076eb4d8d79c0181f901 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 14:44:16 +0100 Subject: Make content of tag an empty object rather than null --- synapse/server_notices/resource_limits_server_notices.py | 2 +- synapse/server_notices/server_notices_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 98495ddbf9..7144dfcbc2 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -153,7 +153,7 @@ class ResourceLimitsServerNotices(object): need_to_set_tag = False if need_to_set_tag: yield self._store.add_tag_to_room( - user_id, room_id, SERVER_NOTICE_ROOM_TAG, None + user_id, room_id, SERVER_NOTICE_ROOM_TAG, {} ) @defer.inlineCallbacks diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 5968104a99..a5e4728859 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -154,7 +154,7 @@ class ServerNoticesManager(object): ) room_id = info['room_id'] yield self._store.add_tag_to_room( - user_id, room_id, SERVER_NOTICE_ROOM_TAG, None + user_id, room_id, SERVER_NOTICE_ROOM_TAG, {}, ) logger.info("Created server notices room %s for %s", room_id, user_id) -- cgit 1.5.1 From cdd24449eeec12f869ceca233085f957d8dc7d19 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 14:50:03 +0100 Subject: Ensure we wake up /sync when we add tag to notice room --- synapse/server_notices/resource_limits_server_notices.py | 7 ++++++- synapse/server_notices/server_notices_manager.py | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 7144dfcbc2..893b9001cd 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -46,6 +46,8 @@ class ResourceLimitsServerNotices(object): self._message_handler = hs.get_message_handler() self._state = hs.get_state_handler() + self._notifier = hs.get_notifier() + @defer.inlineCallbacks def maybe_send_server_notice_to_user(self, user_id): """Check if we need to send a notice to this user, this will be true in @@ -152,9 +154,12 @@ class ResourceLimitsServerNotices(object): # tag already present, nothing to do here need_to_set_tag = False if need_to_set_tag: - yield self._store.add_tag_to_room( + max_id = yield self._store.add_tag_to_room( user_id, room_id, SERVER_NOTICE_ROOM_TAG, {} ) + self._notifier.on_new_event( + "account_data_key", max_id, users=[user_id] + ) @defer.inlineCallbacks def _is_room_currently_blocked(self, room_id): diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index a5e4728859..c5cc6d728e 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -39,6 +39,8 @@ class ServerNoticesManager(object): self._event_creation_handler = hs.get_event_creation_handler() self._is_mine_id = hs.is_mine_id + self._notifier = hs.get_notifier() + def is_enabled(self): """Checks if server notices are enabled on this server. @@ -153,9 +155,13 @@ class ServerNoticesManager(object): creator_join_profile=join_profile, ) room_id = info['room_id'] - yield self._store.add_tag_to_room( + + max_id = yield self._store.add_tag_to_room( user_id, room_id, SERVER_NOTICE_ROOM_TAG, {}, ) + self._notifier.on_new_event( + "account_data_key", max_id, users=[user_id] + ) logger.info("Created server notices room %s for %s", room_id, user_id) defer.returnValue(room_id) -- cgit 1.5.1 From 01a5a8b9e3f4ce0b4e2419df5dc3645de0428629 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 16:22:37 +0100 Subject: Fix checking if service notice room is already tagged This manifested in synapse repeatedly setting the tag for the room --- synapse/server_notices/resource_limits_server_notices.py | 7 +++---- tests/server_notices/test_resource_limits_server_notices.py | 1 + 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 893b9001cd..5d86114e09 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -146,11 +146,10 @@ class ResourceLimitsServerNotices(object): user_id(str): the user in question room_id(str): the server notices room for that user """ - tags = yield self._store.get_tags_for_user(user_id) - server_notices_tags = tags.get(room_id) + tags = yield self._store.get_tags_for_room(user_id, room_id) need_to_set_tag = True - if server_notices_tags: - if server_notices_tags.get(SERVER_NOTICE_ROOM_TAG): + if tags: + if SERVER_NOTICE_ROOM_TAG in tags: # tag already present, nothing to do here need_to_set_tag = False if need_to_set_tag: diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index fede3f52a9..0ddf4fd50d 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -55,6 +55,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): returnValue="" ) self._rlsn._store.add_tag_to_room = Mock() + self._rlsn._store.get_tags_for_room = Mock(return_value={}) self.hs.config.admin_uri = "mailto:user@test.com" @defer.inlineCallbacks -- cgit 1.5.1 From 05077e06fa527a9b504e34bbd14af53ba66d3f62 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Aug 2018 16:51:27 +0100 Subject: Change admin_uri to admin_contact in config and errors --- synapse/api/auth.py | 4 ++-- synapse/api/errors.py | 6 +++--- synapse/config/server.py | 4 ++-- synapse/server_notices/resource_limits_server_notices.py | 2 +- tests/api/test_auth.py | 4 ++-- tests/server_notices/test_resource_limits_server_notices.py | 4 ++-- tests/utils.py | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 8d2aa5870a..a7e3f7a7ac 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -793,7 +793,7 @@ class Auth(object): raise ResourceLimitError( 403, self.hs.config.hs_disabled_message, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, - admin_uri=self.hs.config.admin_uri, + admin_contact=self.hs.config.admin_contact, limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: @@ -812,7 +812,7 @@ class Auth(object): raise ResourceLimitError( 403, "Monthly Active User Limit Exceeded", - admin_uri=self.hs.config.admin_uri, + admin_contact=self.hs.config.admin_contact, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, limit_type="monthly_active_user" ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index c4ddba9889..2e7f98404d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -239,10 +239,10 @@ class ResourceLimitError(SynapseError): def __init__( self, code, msg, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, - admin_uri=None, + admin_contact=None, limit_type=None, ): - self.admin_uri = admin_uri + self.admin_contact = admin_contact self.limit_type = limit_type super(ResourceLimitError, self).__init__(code, msg, errcode=errcode) @@ -250,7 +250,7 @@ class ResourceLimitError(SynapseError): return cs_error( self.msg, self.errcode, - admin_uri=self.admin_uri, + admin_contact=self.admin_contact, limit_type=self.limit_type ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 8eecd28e7d..2faf472189 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -93,7 +93,7 @@ class ServerConfig(Config): # Admin uri to direct users at should their instance become blocked # due to resource constraints - self.admin_uri = config.get("admin_uri", None) + self.admin_contact = config.get("admin_contact", None) # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist = None @@ -357,7 +357,7 @@ class ServerConfig(Config): # Homeserver blocking # # How to reach the server admin, used in ResourceLimitError - # admin_uri: 'mailto:admin@server.com' + # admin_contact: 'mailto:admin@server.com' # # Global block config # diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 893b9001cd..c584c8f907 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -118,7 +118,7 @@ class ResourceLimitsServerNotices(object): 'body': event_content, 'msgtype': ServerNoticeMsgType, 'server_notice_type': ServerNoticeLimitReached, - 'admin_uri': self._config.admin_uri, + 'admin_contact': self._config.admin_contact, 'limit_type': event_limit_type } event = yield self._server_notices_manager.send_notice( diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ed960090c4..54e396d19d 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -457,7 +457,7 @@ class AuthTestCase(unittest.TestCase): with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() - self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) + self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) @@ -473,7 +473,7 @@ class AuthTestCase(unittest.TestCase): self.hs.config.hs_disabled_message = "Reason for being disabled" with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() - self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) + self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index fede3f52a9..494a7bc91e 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -55,7 +55,7 @@ class TestResourceLimitsServerNotices(unittest.TestCase): returnValue="" ) self._rlsn._store.add_tag_to_room = Mock() - self.hs.config.admin_uri = "mailto:user@test.com" + self.hs.config.admin_contact = "mailto:user@test.com" @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_flag_off(self): @@ -172,7 +172,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): self.user_id = "@user_id:test" - self.hs.config.admin_uri = "mailto:user@test.com" + self.hs.config.admin_contact = "mailto:user@test.com" @defer.inlineCallbacks def test_server_notice_only_sent_once(self): diff --git a/tests/utils.py b/tests/utils.py index 9f7ff94575..e8ef10445c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -142,7 +142,7 @@ def setup_test_homeserver( config.hs_disabled_limit_type = "" config.max_mau_value = 50 config.mau_limits_reserved_threepids = [] - config.admin_uri = None + config.admin_contact = None # we need a sane default_room_version, otherwise attempts to create rooms will # fail. -- cgit 1.5.1 From 16a31c6fcebe31bbdb655ebcb24ccce426b3e994 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Fri, 24 Aug 2018 22:51:25 -0400 Subject: update to newer Synapse APIs --- synapse/handlers/e2e_room_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index a43fc7fc7e..c09816b372 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -18,7 +18,7 @@ import logging from twisted.internet import defer from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError -from synapse.util.async import Linearizer +from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 8bfffb5c0e..7fa4264441 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -52,6 +52,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( + self.addCleanup, handlers=None, replication_layer=mock.Mock(), ) -- cgit 1.5.1 From 0b07f02e19e5ed1eba0e74c3863def9f849b5f9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 13:39:49 +0100 Subject: Make sure that we close db connections opened during init We should explicitly close any db connections we open, because failing to do so can block other transactions as per https://github.com/matrix-org/synapse/issues/3682. Let's also try to factor out some of the boilerplate by having server classes define their datastore class rather than duplicating the whole of `setup`. --- synapse/app/appservice.py | 5 +---- synapse/app/client_reader.py | 5 +---- synapse/app/event_creator.py | 5 +---- synapse/app/federation_reader.py | 5 +---- synapse/app/federation_sender.py | 5 +---- synapse/app/frontend_proxy.py | 5 +---- synapse/app/homeserver.py | 14 ++++++++------ synapse/app/media_repository.py | 5 +---- synapse/app/pusher.py | 5 +---- synapse/app/synchrotron.py | 5 +---- synapse/app/user_dir.py | 5 +---- synapse/server.py | 15 +++++++++++++-- 12 files changed, 31 insertions(+), 48 deletions(-) (limited to 'synapse') diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 3348a8ec6d..86b5067400 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -51,10 +51,7 @@ class AppserviceSlaveStore( class AppserviceServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = AppserviceSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = AppserviceSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ab79a45646..ce2b113dbb 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -74,10 +74,7 @@ class ClientReaderSlavedStore( class ClientReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = ClientReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index a34c89fa99..f98e456ea0 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -90,10 +90,7 @@ class EventCreatorSlavedStore( class EventCreatorServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = EventCreatorSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 7d8105778d..60f5973505 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -72,10 +72,7 @@ class FederationReaderSlavedStore( class FederationReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index d59007099b..60dd09aac3 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -78,10 +78,7 @@ class FederationSenderSlaveStore( class FederationSenderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationSenderSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8d484c1cd4..8c0b9c67b0 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -148,10 +148,7 @@ class FrontendProxySlavedStore( class FrontendProxyServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FrontendProxySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 005921dcf7..3eb5b663de 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer -from synapse.storage import are_all_users_on_domain +from synapse.storage import DataStore, are_all_users_on_domain from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR @@ -111,6 +111,8 @@ def build_resource_for_web_client(hs): class SynapseHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] @@ -356,13 +358,13 @@ def setup(config_options): logger.info("Preparing database: %s...", config.database_config['name']) try: - db_conn = hs.get_db_conn(run_new_connection=False) - prepare_database(db_conn, database_engine, config=config) - database_engine.on_new_connection(db_conn) + with hs.get_db_conn(run_new_connection=False) as db_conn: + prepare_database(db_conn, database_engine, config=config) + database_engine.on_new_connection(db_conn) - hs.run_startup_checks(db_conn, database_engine) + hs.run_startup_checks(db_conn, database_engine) - db_conn.commit() + db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index fd1f6cbf7e..e3dbb3b4e6 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -60,10 +60,7 @@ class MediaRepositorySlavedStore( class MediaRepositoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = MediaRepositorySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index a4fc7e91fa..244c604de9 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -78,10 +78,7 @@ class PusherSlaveStore( class PusherServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = PusherSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = PusherSlaveStore def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 27e1998660..6662340797 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -249,10 +249,7 @@ class SynchrotronApplicationService(object): class SynchrotronServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = SynchrotronSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 1388a42b59..96ffcaf073 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -94,10 +94,7 @@ class UserDirectorySlaveStore( class UserDirectoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = UserDirectorySlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/server.py b/synapse/server.py index a6fbc6ec0c..802d679cde 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -81,7 +81,6 @@ from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender from synapse.state import StateHandler, StateResolutionHandler -from synapse.storage import DataStore from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor @@ -172,6 +171,11 @@ class HomeServer(object): 'room_context_handler', ] + # This is overridden in derived application classes + # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be + # instantiated during setup() for future return by get_datastore() + DATASTORE_CLASS = None + def __init__(self, hostname, reactor=None, **kwargs): """ Args: @@ -188,13 +192,20 @@ class HomeServer(object): self.distributor = Distributor() self.ratelimiter = Ratelimiter() + self.datastore = None + # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname]) def setup(self): logger.info("Setting up.") - self.datastore = DataStore(self.get_db_conn(), self) + if self.DATASTORE_CLASS is None: + raise RuntimeError("%s does not define a DATASTORE_CLASS" % ( + self.__class__.__name__, + )) + with self.get_db_conn() as conn: + self.datastore = self.DATASTORE_CLASS(conn, self) logger.info("Finished setting up.") def get_reactor(self): -- cgit 1.5.1 From 32eb1dedd294831f2bafc79a24dae91eee319563 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 17:10:43 +0100 Subject: use abc.abstractproperty This gives clearer messages when someone gets it wrong --- synapse/server.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/server.py b/synapse/server.py index 802d679cde..938a05f9dc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -19,6 +19,7 @@ # partial one for unit test mocking. # Imports required for the default HomeServer() implementation +import abc import logging from twisted.enterprise import adbapi @@ -110,6 +111,8 @@ class HomeServer(object): config (synapse.config.homeserver.HomeserverConfig): """ + __metaclass__ = abc.ABCMeta + DEPENDENCIES = [ 'http_client', 'db_pool', @@ -174,7 +177,7 @@ class HomeServer(object): # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be # instantiated during setup() for future return by get_datastore() - DATASTORE_CLASS = None + DATASTORE_CLASS = abc.abstractproperty() def __init__(self, hostname, reactor=None, **kwargs): """ @@ -200,10 +203,6 @@ class HomeServer(object): def setup(self): logger.info("Setting up.") - if self.DATASTORE_CLASS is None: - raise RuntimeError("%s does not define a DATASTORE_CLASS" % ( - self.__class__.__name__, - )) with self.get_db_conn() as conn: self.datastore = self.DATASTORE_CLASS(conn, self) logger.info("Finished setting up.") -- cgit 1.5.1 From b1580f50fea05ce02dc4663adb23704b33e30bf3 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 28 Aug 2018 23:25:58 +0100 Subject: don't return non-LL-member state in incremental sync state blocks (#3760) don't return non-LL-member state in incremental sync state blocks --- changelog.d/3760.bugfix | 1 + synapse/handlers/sync.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/3760.bugfix (limited to 'synapse') diff --git a/changelog.d/3760.bugfix b/changelog.d/3760.bugfix new file mode 100644 index 0000000000..ce61fb8a2b --- /dev/null +++ b/changelog.d/3760.bugfix @@ -0,0 +1 @@ +Don't return non-LL-member state in incremental sync state blocks diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 648debc8aa..ef20c2296c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -745,9 +745,16 @@ class SyncHandler(object): state_ids = {} if lazy_load_members: if types: + # We're returning an incremental sync, with no "gap" since + # the previous sync, so normally there would be no state to return + # But we're lazy-loading, so the client might need some more + # member events to understand the events in this timeline. + # So we fish out all the member events corresponding to the + # timeline here, and then dedupe any redundant ones below. + state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, types=types, - filtered_types=filtered_types, + filtered_types=None, # we only want members! ) if lazy_load_members and not include_redundant_members: -- cgit 1.5.1 From 79a8a347a6c55a3ec6255227930dd9aad4b0b78f Mon Sep 17 00:00:00 2001 From: Krombel Date: Wed, 29 Aug 2018 16:26:21 +0200 Subject: fix #3445 itervalues(d) calls d.itervalues() [PY2] and d.values() [PY3] but SortedDict only implements d.values() --- changelog.d/3445.bugfix | 1 + synapse/federation/send_queue.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3445.bugfix (limited to 'synapse') diff --git a/changelog.d/3445.bugfix b/changelog.d/3445.bugfix new file mode 100644 index 0000000000..f7f1c4778d --- /dev/null +++ b/changelog.d/3445.bugfix @@ -0,0 +1 @@ +do not use six.itervalues() on SortedDict() diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0bb468385d..6f5995735a 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -32,7 +32,7 @@ Events are replicated via a separate events stream. import logging from collections import namedtuple -from six import iteritems, itervalues +from six import iteritems from sortedcontainers import SortedDict @@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object): user_ids = set( user_id - for uids in itervalues(self.presence_changed) + for uids in self.presence_changed.values() for user_id in uids ) -- cgit 1.5.1 From 14e4d4f4bf894f4e70118e03f4f4575e1eb6dab6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 31 Aug 2018 00:19:58 +1000 Subject: Port storage/ to Python 3 (#3725) --- changelog.d/3725.misc | 1 + jenkins/prepare_synapse.sh | 2 +- synapse/python_dependencies.py | 3 + synapse/storage/_base.py | 32 ++++++++++- synapse/storage/deviceinbox.py | 2 +- synapse/storage/devices.py | 10 ++-- synapse/storage/end_to_end_keys.py | 6 +- synapse/storage/engines/postgres.py | 9 ++- synapse/storage/events.py | 10 ++-- synapse/storage/events_worker.py | 9 +-- synapse/storage/filtering.py | 6 +- synapse/storage/pusher.py | 14 +++-- synapse/storage/transactions.py | 7 ++- tests/rest/client/v1/utils.py | 14 +++-- tests/storage/test_purge.py | 106 ++++++++++++++++++++++++++++++++++++ tests/unittest.py | 11 ++++ tests/utils.py | 2 + 17 files changed, 208 insertions(+), 36 deletions(-) create mode 100644 changelog.d/3725.misc create mode 100644 tests/storage/test_purge.py (limited to 'synapse') diff --git a/changelog.d/3725.misc b/changelog.d/3725.misc new file mode 100644 index 0000000000..91ab9d7137 --- /dev/null +++ b/changelog.d/3725.misc @@ -0,0 +1 @@ +The synapse.storage module has been ported to Python 3. diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index a30179f2aa..d95ca846c4 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5' $TOX_BIN/pip install 'pip>=10' { python synapse/python_dependencies.py - echo lxml psycopg2 + echo lxml } | xargs $TOX_BIN/pip install diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 9c55e79ef5..942d7c721f 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -78,6 +78,9 @@ CONDITIONAL_REQUIREMENTS = { "affinity": { "affinity": ["affinity"], }, + "postgres": { + "psycopg2>=2.6": ["psycopg2"] + } } diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 08dffd774f..be61147b9b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -17,9 +17,10 @@ import sys import threading import time -from six import iteritems, iterkeys, itervalues +from six import PY2, iteritems, iterkeys, itervalues from six.moves import intern, range +from canonicaljson import json from prometheus_client import Histogram from twisted.internet import defer @@ -1216,3 +1217,32 @@ class _RollbackButIsFineException(Exception): something went wrong. """ pass + + +def db_to_json(db_content): + """ + Take some data from a database row and return a JSON-decoded object. + + Args: + db_content (memoryview|buffer|bytes|bytearray|unicode) + """ + # psycopg2 on Python 3 returns memoryview objects, which we need to + # cast to bytes to decode + if isinstance(db_content, memoryview): + db_content = db_content.tobytes() + + # psycopg2 on Python 2 returns buffer objects, which we need to cast to + # bytes to decode + if PY2 and isinstance(db_content, buffer): + db_content = bytes(db_content) + + # Decode it to a Unicode string before feeding it to json.loads, so we + # consistenty get a Unicode-containing object out. + if isinstance(db_content, (bytes, bytearray)): + db_content = db_content.decode('utf8') + + try: + return json.loads(db_content) + except Exception: + logging.warning("Tried to decode '%r' as JSON and failed", db_content) + raise diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 73646da025..e06b0bc56d 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -169,7 +169,7 @@ class DeviceInboxStore(BackgroundUpdateStore): local_by_user_then_device = {} for user_id, messages_by_device in messages_by_user_then_device.items(): messages_json_for_user = {} - devices = messages_by_device.keys() + devices = list(messages_by_device.keys()) if len(devices) == 1 and devices[0] == "*": # Handle wildcard device_ids. sql = ( diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index c0943ecf91..d10ff9e4b9 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -24,7 +24,7 @@ from synapse.api.errors import StoreError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from ._base import Cache, SQLBaseStore +from ._base import Cache, SQLBaseStore, db_to_json logger = logging.getLogger(__name__) @@ -411,7 +411,7 @@ class DeviceStore(SQLBaseStore): if device is not None: key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name @@ -466,7 +466,7 @@ class DeviceStore(SQLBaseStore): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(json.loads(content)) + defer.returnValue(db_to_json(content)) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -479,7 +479,7 @@ class DeviceStore(SQLBaseStore): desc="_get_cached_devices_for_user", ) defer.returnValue({ - device["device_id"]: json.loads(device["content"]) + device["device_id"]: db_to_json(device["content"]) for device in devices }) @@ -511,7 +511,7 @@ class DeviceStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 523b4360c3..1f1721e820 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -14,13 +14,13 @@ # limitations under the License. from six import iteritems -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.util.caches.descriptors import cached -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class EndToEndKeyStore(SQLBaseStore): @@ -90,7 +90,7 @@ class EndToEndKeyStore(SQLBaseStore): for user_id, device_keys in iteritems(results): for device_id, device_info in iteritems(device_keys): - device_info["keys"] = json.loads(device_info.pop("key_json")) + device_info["keys"] = db_to_json(device_info.pop("key_json")) defer.returnValue(results) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 8a0386c1a4..42225f8a2a 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -41,13 +41,18 @@ class PostgresEngine(object): db_conn.set_isolation_level( self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ ) + + # Set the bytea output to escape, vs the default of hex + cursor = db_conn.cursor() + cursor.execute("SET bytea_output TO escape") + # Asynchronous commit, don't wait for the server to call fsync before # ending the transaction. # https://www.postgresql.org/docs/current/static/wal-async-commit.html if not self.synchronous_commit: - cursor = db_conn.cursor() cursor.execute("SET synchronous_commit TO OFF") - cursor.close() + + cursor.close() def is_deadlock(self, error): if isinstance(error, self.module.DatabaseError): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f39c8c8461..8bf87f38f7 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,7 @@ import logging from collections import OrderedDict, deque, namedtuple from functools import wraps -from six import iteritems +from six import iteritems, text_type from six.moves import range from canonicaljson import json @@ -1220,7 +1220,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore "sender": event.sender, "contains_url": ( "url" in event.content - and isinstance(event.content["url"], basestring) + and isinstance(event.content["url"], text_type) ), } for event, _ in events_and_contexts @@ -1529,7 +1529,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore contains_url = "url" in content if contains_url: - contains_url &= isinstance(content["url"], basestring) + contains_url &= isinstance(content["url"], text_type) except (KeyError, AttributeError): # If the event is missing a necessary field then # skip over it. @@ -1910,9 +1910,9 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore (room_id,) ) rows = txn.fetchall() - max_depth = max(row[0] for row in rows) + max_depth = max(row[1] for row in rows) - if max_depth <= token.topological: + if max_depth < token.topological: # We need to ensure we don't delete all the events from the database # otherwise we wouldn't be able to send any events (due to not # having any backwards extremeties) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 59822178ff..a8326f5296 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import itertools import logging from collections import namedtuple @@ -265,7 +266,7 @@ class EventsWorkerStore(SQLBaseStore): """ with Measure(self._clock, "_fetch_event_list"): try: - event_id_lists = zip(*event_list)[0] + event_id_lists = list(zip(*event_list))[0] event_ids = [ item for sublist in event_id_lists for item in sublist ] @@ -299,14 +300,14 @@ class EventsWorkerStore(SQLBaseStore): logger.exception("do_fetch") # We only want to resolve deferreds from the main thread - def fire(evs): + def fire(evs, exc): for _, d in evs: if not d.called: with PreserveLoggingContext(): - d.errback(e) + d.errback(exc) with PreserveLoggingContext(): - self.hs.get_reactor().callFromThread(fire, event_list) + self.hs.get_reactor().callFromThread(fire, event_list, e) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index 2d5896c5b4..6ddcc909bf 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class FilteringStore(SQLBaseStore): @@ -44,7 +44,7 @@ class FilteringStore(SQLBaseStore): desc="get_user_filter", ) - defer.returnValue(json.loads(bytes(def_json).decode("utf-8"))) + defer.returnValue(db_to_json(def_json)) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 8443bd4c1b..c7987bfcdd 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import types + +import six from canonicaljson import encode_canonical_json, json @@ -27,6 +28,11 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) +if six.PY2: + db_binary_type = buffer +else: + db_binary_type = memoryview + class PusherWorkerStore(SQLBaseStore): def _decode_pushers_rows(self, rows): @@ -34,18 +40,18 @@ class PusherWorkerStore(SQLBaseStore): dataJson = r['data'] r['data'] = None try: - if isinstance(dataJson, types.BufferType): + if isinstance(dataJson, db_binary_type): dataJson = str(dataJson).decode("UTF8") r['data'] = json.loads(dataJson) except Exception as e: logger.warn( "Invalid JSON in data for pusher %d: %s, %s", - r['id'], dataJson, e.message, + r['id'], dataJson, e.args[0], ) pass - if isinstance(r['pushkey'], types.BufferType): + if isinstance(r['pushkey'], db_binary_type): r['pushkey'] = str(r['pushkey']).decode("UTF8") return rows diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 428e7fa36e..0c42bd3322 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -18,14 +18,14 @@ from collections import namedtuple import six -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches.descriptors import cached -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview @@ -95,7 +95,8 @@ class TransactionStore(SQLBaseStore): ) if result and result["response_code"]: - return result["response_code"], json.loads(str(result["response_json"])) + return result["response_code"], db_to_json(result["response_json"]) + else: return None diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 40dc4ea256..530dc8ba6d 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -240,7 +240,6 @@ class RestHelper(object): self.assertEquals(200, code) defer.returnValue(response) - @defer.inlineCallbacks def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200): if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -248,9 +247,16 @@ class RestHelper(object): body = "body_text_here" path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id) - content = '{"msgtype":"m.text","body":"%s"}' % body + content = {"msgtype": "m.text", "body": body} if tok: path = path + "?access_token=%s" % tok - (code, response) = yield self.mock_resource.trigger("PUT", path, content) - self.assertEquals(expect_code, code, msg=str(response)) + request, channel = make_request("PUT", path, json.dumps(content).encode('utf8')) + render(request, self.resource, self.hs.get_reactor()) + + assert int(channel.result["code"]) == expect_code, ( + "Expected: %d, got: %d, resp: %r" + % (expect_code, int(channel.result["code"]), channel.result["body"]) + ) + + return channel.json_body diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py new file mode 100644 index 0000000000..f671599cb8 --- /dev/null +++ b/tests/storage/test_purge.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import room + +from tests.unittest import HomeserverTestCase + + +class PurgeTests(HomeserverTestCase): + + user_id = "@red:server" + servlets = [room.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + return hs + + def prepare(self, reactor, clock, hs): + self.room_id = self.helper.create_room_as(self.user_id) + + def test_purge(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Get the topological token + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + self.assertEqual(self.successResultOf(purge), None) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # 1-3 should fail and last will succeed, meaning that 1-3 are deleted + # and last is not. + self.failureResultOf(get_first) + self.failureResultOf(get_second) + self.failureResultOf(get_third) + self.successResultOf(get_last) + + def test_purge_wont_delete_extrems(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Set the topological token higher than it should be + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + event = "t{}-{}".format( + *list(map(lambda x: x + 1, map(int, event[1:].split("-")))) + ) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + f = self.failureResultOf(purge) + self.assertIn("greater than forward", f.value.args[0]) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # Nothing is deleted. + self.successResultOf(get_first) + self.successResultOf(get_second) + self.successResultOf(get_third) + self.successResultOf(get_last) diff --git a/tests/unittest.py b/tests/unittest.py index d852e2465a..8b513bb32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -151,6 +151,7 @@ class HomeserverTestCase(TestCase): hijack_auth (bool): Whether to hijack auth to return the user specified in user_id. """ + servlets = [] hijack_auth = True @@ -279,3 +280,13 @@ class HomeserverTestCase(TestCase): kwargs = dict(kwargs) kwargs.update(self._hs_args) return setup_test_homeserver(self.addCleanup, *args, **kwargs) + + def pump(self): + """ + Pump the reactor enough that Deferreds will fire. + """ + self.reactor.pump([0.0] * 100) + + def get_success(self, d): + self.pump() + return self.successResultOf(d) diff --git a/tests/utils.py b/tests/utils.py index 657c258a6e..179b592501 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -147,6 +147,8 @@ def setup_test_homeserver( config.max_mau_value = 50 config.mau_limits_reserved_threepids = [] config.admin_contact = None + config.rc_messages_per_second = 10000 + config.rc_message_burst_count = 10000 # we need a sane default_room_version, otherwise attempts to create rooms will # fail. -- cgit 1.5.1 From ea068d6f3cd5ed1bc9a39b2fd43e19d6d40f18da Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 10:49:14 +0100 Subject: fix bug where preserved threepid user comes to sign up and server is mau blocked --- synapse/api/auth.py | 10 +++++++++- synapse/handlers/register.py | 3 ++- synapse/rest/client/v1_only/register.py | 6 +++++- synapse/rest/client/v2_alpha/register.py | 5 +++++ tests/api/test_auth.py | 17 +++++++++++++++++ 5 files changed, 38 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a7e3f7a7ac..9c207b9537 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -775,7 +775,7 @@ class Auth(object): ) @defer.inlineCallbacks - def check_auth_blocking(self, user_id=None): + def check_auth_blocking(self, user_id=None, threepid=None): """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag @@ -806,6 +806,14 @@ class Auth(object): is_trial = yield self.store.is_trial_user(user_id) if is_trial: return + elif threepid: + # If the user does not exist yet, but is signing up with a + # reserved threepid then pass auth check + for tp in self.hs.config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return + # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f03ee1476b..1e53f2c635 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -125,6 +125,7 @@ class RegistrationHandler(BaseHandler): guest_access_token=None, make_guest=False, admin=False, + threepid=None, ): """Registers a new client on the server. @@ -145,7 +146,7 @@ class RegistrationHandler(BaseHandler): RegistrationError if there was a problem registering. """ - yield self.auth.check_auth_blocking() + yield self.auth.check_auth_blocking(threepid=threepid) password_hash = None if password: password_hash = yield self.auth_handler().hash(password) diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 5e99cffbcb..2c7bbcb171 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -281,11 +281,15 @@ class RegisterRestServlet(ClientV1RestServlet): register_json["user"].encode("utf-8") if "user" in register_json else None ) + threepid = None + if session[LoginType.EMAIL_IDENTITY]: + threepid = session["threepidCreds"] handler = self.handlers.registration_handler (user_id, token) = yield handler.register( localpart=desired_user_id, - password=password + password=password, + threepid=threepid, ) if session[LoginType.EMAIL_IDENTITY]: diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2f64155d13..45113e5386 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -395,11 +395,16 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() + threepid = None + if auth_result: + threepid = auth_result.get(LoginType.EMAIL_IDENTITY) + (registered_user_id, _) = yield self.registration_handler.register( localpart=desired_username, password=new_password, guest_access_token=guest_access_token, generate_token=False, + threepid=threepid, ) # remember that we've now registered that user account, and with diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 54e396d19d..f65a27e5f1 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -467,6 +467,23 @@ class AuthTestCase(unittest.TestCase): ) yield self.auth.check_auth_blocking() + @defer.inlineCallbacks + def test_reserved_threepid(self): + self.hs.config.limit_usage_by_mau = True + self.hs.config.max_mau_value = 1 + threepid = {'medium': 'email', 'address': 'reserved@server.com'} + unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'} + self.hs.config.mau_limits_reserved_threepids = [threepid] + + yield self.store.register(user_id='user1', token="123", password_hash=None) + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking() + + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking(threepid=unknown_threepid) + + yield self.auth.check_auth_blocking(threepid=threepid) + @defer.inlineCallbacks def test_hs_disabled(self): self.hs.config.hs_disabled = True -- cgit 1.5.1 From 09f3cf1a7ef0c533d052a5c87257503b710093c6 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 15:42:51 +0100 Subject: ensure post registration auth checks do not fail erroneously --- synapse/api/auth.py | 7 ++----- synapse/rest/client/v1_only/register.py | 4 ++++ synapse/rest/client/v2_alpha/register.py | 4 ++++ synapse/storage/monthly_active_users.py | 15 ++++++++++++++- 4 files changed, 24 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 9c207b9537..6a97c06110 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -809,11 +809,8 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - for tp in self.hs.config.mau_limits_reserved_threepids: - if (threepid['medium'] == tp['medium'] - and threepid['address'] == tp['address']): - return - + if is_threepid_reserved(threepid): + return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 2c7bbcb171..95873e03d5 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -291,6 +291,10 @@ class RegisterRestServlet(ClientV1RestServlet): password=password, threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if self.store.is_threepid_reserved(threepid): + self.store.upsert_monthly_active_user(registered_user_id) if session[LoginType.EMAIL_IDENTITY]: logger.debug("Binding emails %s to %s" % ( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 45113e5386..f22b7577ea 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -406,6 +406,10 @@ class RegisterRestServlet(RestServlet): generate_token=False, threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if self.store.is_threepid_reserved(threepid): + self.store.upsert_monthly_active_user(registered_user_id) # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index d178f5c5ba..173867c4b1 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -36,7 +36,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def initialise_reserved_users(self, threepids): - # TODO Why can't I do this in init? store = self.hs.get_datastore() reserved_user_list = [] @@ -220,3 +219,17 @@ class MonthlyActiveUsersStore(SQLBaseStore): yield self.upsert_monthly_active_user(user_id) elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY: yield self.upsert_monthly_active_user(user_id) + + def is_threepid_reserved(self, threepid): + """Check the threepid against the reserved threepid config + Args: + threepid(dict) - The threepid to test for + Returns: + boolean Is the threepid undertest reserved_user + """ + for tp in self.hs.config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return True + else: + return False -- cgit 1.5.1 From e8e540630ec96d6ed856fb143dbb62b91e15084d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 16:09:15 +0100 Subject: fix reference to is_threepid_reserved --- synapse/api/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 6a97c06110..a36fb6b3bd 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -809,7 +809,7 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - if is_threepid_reserved(threepid): + if self.store.is_threepid_reserved(threepid): return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() -- cgit 1.5.1 From 0b01281e77aee7e69925f36dbb6a798772a98a45 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 17:11:11 +0100 Subject: move threepid checker to config, add missing yields --- synapse/api/auth.py | 13 +++++++++++-- synapse/config/server.py | 17 +++++++++++++++++ synapse/rest/client/v1_only/register.py | 7 ++++--- synapse/rest/client/v2_alpha/register.py | 5 +++-- synapse/storage/monthly_active_users.py | 14 -------------- tests/utils.py | 6 ++++++ 6 files changed, 41 insertions(+), 21 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a36fb6b3bd..a89687f420 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -26,6 +26,7 @@ import synapse.types from synapse import event_auth from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.config.server import is_threepid_reserved from synapse.types import UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches.lrucache import LruCache @@ -782,6 +783,11 @@ class Auth(object): Args: user_id(str|None): If present, checks for presence against existing MAU cohort + threepid(dict|None): If present, checks for presence against configured + reserved threepid. Used in cases where the user is trying register + with a MAU blocked server, normally they would be rejected but their + threepid is on the reserved list. user_id and + threepid should never be set at the same time. """ # Never fail an auth check for the server notices users @@ -797,6 +803,10 @@ class Auth(object): limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: + + if user_id and threepid: + logger.warn("Called with both user_id and threepid, this shoudn't happen") + # If the user is already part of the MAU cohort or a trial user if user_id: timestamp = yield self.store.user_last_seen_monthly_active(user_id) @@ -809,14 +819,13 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - if self.store.is_threepid_reserved(threepid): + if is_threepid_reserved(self.hs.config, threepid): return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: raise ResourceLimitError( 403, "Monthly Active User Limit Exceeded", - admin_contact=self.hs.config.admin_contact, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, limit_type="monthly_active_user" diff --git a/synapse/config/server.py b/synapse/config/server.py index 2faf472189..c1c7c0105e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -404,6 +404,23 @@ class ServerConfig(Config): " service on the given port.") +def is_threepid_reserved(config, threepid): + """Check the threepid against the reserved threepid config + Args: + config(ServerConfig) - to access server config attributes + threepid(dict) - The threepid to test for + + Returns: + boolean Is the threepid undertest reserved_user + """ + + for tp in config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return True + return False + + def read_gc_thresholds(thresholds): """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 95873e03d5..dadb376b02 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -23,6 +23,7 @@ from twisted.internet import defer import synapse.util.stringutils as stringutils from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.rest.client.v1.base import ClientV1RestServlet from synapse.types import create_requester @@ -282,7 +283,7 @@ class RegisterRestServlet(ClientV1RestServlet): if "user" in register_json else None ) threepid = None - if session[LoginType.EMAIL_IDENTITY]: + if session.get(LoginType.EMAIL_IDENTITY): threepid = session["threepidCreds"] handler = self.handlers.registration_handler @@ -293,8 +294,8 @@ class RegisterRestServlet(ClientV1RestServlet): ) # Necessary due to auth checks prior to the threepid being # written to the db - if self.store.is_threepid_reserved(threepid): - self.store.upsert_monthly_active_user(registered_user_id) + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(user_id) if session[LoginType.EMAIL_IDENTITY]: logger.debug("Binding emails %s to %s" % ( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f22b7577ea..2fb4d43ccb 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -26,6 +26,7 @@ import synapse import synapse.types from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -408,8 +409,8 @@ class RegisterRestServlet(RestServlet): ) # Necessary due to auth checks prior to the threepid being # written to the db - if self.store.is_threepid_reserved(threepid): - self.store.upsert_monthly_active_user(registered_user_id) + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(registered_user_id) # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 173867c4b1..c7899d7fd2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -219,17 +219,3 @@ class MonthlyActiveUsersStore(SQLBaseStore): yield self.upsert_monthly_active_user(user_id) elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY: yield self.upsert_monthly_active_user(user_id) - - def is_threepid_reserved(self, threepid): - """Check the threepid against the reserved threepid config - Args: - threepid(dict) - The threepid to test for - Returns: - boolean Is the threepid undertest reserved_user - """ - for tp in self.hs.config.mau_limits_reserved_threepids: - if (threepid['medium'] == tp['medium'] - and threepid['address'] == tp['address']): - return True - else: - return False diff --git a/tests/utils.py b/tests/utils.py index 179b592501..63e30dc6c0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -26,6 +26,7 @@ from twisted.internet import defer, reactor from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error +from synapse.config.server import ServerConfig from synapse.federation.transport import server from synapse.http.server import HttpServer from synapse.server import HomeServer @@ -158,6 +159,11 @@ def setup_test_homeserver( # background, which upsets the test runner. config.update_user_directory = False + def is_threepid_reserved(threepid): + return ServerConfig.is_threepid_reserved(config, threepid) + + config.is_threepid_reserved.side_effect = is_threepid_reserved + config.use_frozen_dicts = True config.ldap_enabled = False -- cgit 1.5.1 From 301cb60d0b2de55f7feac24a043b2624ad3c8733 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 17:29:35 +0100 Subject: assert rather than warn --- synapse/api/auth.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a89687f420..34382e4e3c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -783,6 +783,7 @@ class Auth(object): Args: user_id(str|None): If present, checks for presence against existing MAU cohort + threepid(dict|None): If present, checks for presence against configured reserved threepid. Used in cases where the user is trying register with a MAU blocked server, normally they would be rejected but their @@ -803,9 +804,7 @@ class Auth(object): limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: - - if user_id and threepid: - logger.warn("Called with both user_id and threepid, this shoudn't happen") + assert not (user_id and threepid) # If the user is already part of the MAU cohort or a trial user if user_id: -- cgit 1.5.1 From b13836da7f5951bfe3973819b5affa12805185cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 17:22:49 +0100 Subject: Remove conn_id from repl prometheus metrics `conn_id` gets set to a random string, and so we end up filling up prometheus with tonnes of data series, which is bad. --- synapse/replication/tcp/protocol.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 74e892c104..0d28152703 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -607,9 +607,9 @@ def transport_buffer_size(protocol): transport_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections + (p.name,): transport_buffer_size(p) for p in connected_connections }, ) @@ -632,9 +632,9 @@ def transport_kernel_read_buffer_size(protocol, read=True): tcp_transport_kernel_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) + (p.name,): transport_kernel_read_buffer_size(p, False) for p in connected_connections }, ) @@ -643,9 +643,9 @@ tcp_transport_kernel_send_buffer = LaterGauge( tcp_transport_kernel_read_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_read_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) + (p.name,): transport_kernel_read_buffer_size(p, True) for p in connected_connections }, ) @@ -654,9 +654,9 @@ tcp_transport_kernel_read_buffer = LaterGauge( tcp_inbound_commands = LaterGauge( "synapse_replication_tcp_protocol_inbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.inbound_commands_counter) }, @@ -665,9 +665,9 @@ tcp_inbound_commands = LaterGauge( tcp_outbound_commands = LaterGauge( "synapse_replication_tcp_protocol_outbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.outbound_commands_counter) }, -- cgit 1.5.1 From 3e242dc14976bf455717c5f631a997a2c0e5f2c4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Sep 2018 11:45:52 +0100 Subject: Remove conn_id --- synapse/replication/tcp/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 0d28152703..5dc7b3fffc 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -590,9 +590,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): pending_commands = LaterGauge( "synapse_replication_tcp_protocol_pending_commands", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections + (p.name,): len(p.pending_commands) for p in connected_connections }, ) -- cgit 1.5.1 From c42f7fd7b9f99923d71ad064fff7d75a5fc3cd93 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 4 Sep 2018 12:03:17 +0100 Subject: improve human readable error messages --- synapse/rest/client/v2_alpha/account.py | 16 ++++++++++++---- synapse/rest/client/v2_alpha/register.py | 11 ++++++++--- 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 372648cafd..37b32dd37b 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -53,7 +53,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -89,7 +91,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -241,7 +245,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -276,7 +282,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2fb4d43ccb..200b29fa38 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -75,7 +75,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -115,7 +117,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Phone numbers are not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -373,7 +377,8 @@ class RegisterRestServlet(RestServlet): if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( - 403, "Third party identifier is not allowed", + 403, + "Third party identifier (email/phone number) is not allowed", Codes.THREEPID_DENIED, ) -- cgit 1.5.1 From bae37cd811fd71c68d28eb300eb0b74a5b6a1c27 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 4 Sep 2018 12:07:00 +0100 Subject: improve human readable error message --- synapse/rest/client/v2_alpha/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 200b29fa38..192f52e462 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -378,7 +378,8 @@ class RegisterRestServlet(RestServlet): if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( 403, - "Third party identifier (email/phone number) is not allowed", + "Third party identifiers (email/phone numbers)" + + " are not authorized on this server", Codes.THREEPID_DENIED, ) -- cgit 1.5.1 From 7e9ced417846307d88075ed34d7764717a32def0 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Sep 2018 21:12:04 +1000 Subject: version and towncrier --- CHANGES.md | 44 ++++++++++++++++++++++++++++++++++++++++++++ changelog.d/3378.misc | 1 - changelog.d/3659.feature | 1 - changelog.d/3673.misc | 1 - changelog.d/3680.feature | 1 - changelog.d/3722.bugfix | 1 - changelog.d/3724.feature | 1 - changelog.d/3725.misc | 1 - changelog.d/3726.misc | 1 - changelog.d/3727.misc | 1 - changelog.d/3730.misc | 1 - changelog.d/3734.misc | 1 - changelog.d/3735.misc | 1 - changelog.d/3737.misc | 1 - changelog.d/3740.misc | 1 - changelog.d/3746.misc | 1 - changelog.d/3747.bugfix | 1 - changelog.d/3749.feature | 1 - changelog.d/3751.feature | 1 - changelog.d/3753.bugfix | 1 - changelog.d/3754.bugfix | 1 - changelog.d/3755.bugfix | 1 - changelog.d/3756.bugfix | 1 - changelog.d/3758.bugfix | 1 - changelog.d/3760.bugfix | 1 - changelog.d/3764.misc | 1 - changelog.d/3768.bugfix | 1 - changelog.d/3777.bugfix | 1 - synapse/__init__.py | 2 +- 29 files changed, 45 insertions(+), 28 deletions(-) delete mode 100644 changelog.d/3378.misc delete mode 100644 changelog.d/3659.feature delete mode 100644 changelog.d/3673.misc delete mode 100644 changelog.d/3680.feature delete mode 100644 changelog.d/3722.bugfix delete mode 100644 changelog.d/3724.feature delete mode 100644 changelog.d/3725.misc delete mode 100644 changelog.d/3726.misc delete mode 100644 changelog.d/3727.misc delete mode 100644 changelog.d/3730.misc delete mode 100644 changelog.d/3734.misc delete mode 100644 changelog.d/3735.misc delete mode 100644 changelog.d/3737.misc delete mode 100644 changelog.d/3740.misc delete mode 100644 changelog.d/3746.misc delete mode 100644 changelog.d/3747.bugfix delete mode 100644 changelog.d/3749.feature delete mode 100644 changelog.d/3751.feature delete mode 100644 changelog.d/3753.bugfix delete mode 100644 changelog.d/3754.bugfix delete mode 100644 changelog.d/3755.bugfix delete mode 100644 changelog.d/3756.bugfix delete mode 100644 changelog.d/3758.bugfix delete mode 100644 changelog.d/3760.bugfix delete mode 100644 changelog.d/3764.misc delete mode 100644 changelog.d/3768.bugfix delete mode 100644 changelog.d/3777.bugfix (limited to 'synapse') diff --git a/CHANGES.md b/CHANGES.md index a35f5aebc7..c2b1e70488 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,47 @@ +Synapse 0.33.4rc1 (2018-09-04) +============================== + +Features +-------- + +- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659)) +- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680)) +- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724)) +- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749)) +- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751)) + + +Bugfixes +-------- + +- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722)) +- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747)) +- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753)) +- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754)) +- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756)) +- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758)) +- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760)) +- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768)) +- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777)) + + +Internal Changes +---------------- + +- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378)) +- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673)) +- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725)) +- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726)) +- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727)) +- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730)) +- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734)) +- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735)) +- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737)) +- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740)) +- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746)) +- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764)) + + Synapse 0.33.3 (2018-08-22) =========================== diff --git a/changelog.d/3378.misc b/changelog.d/3378.misc deleted file mode 100644 index 8f88f88e69..0000000000 --- a/changelog.d/3378.misc +++ /dev/null @@ -1 +0,0 @@ -Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. diff --git a/changelog.d/3659.feature b/changelog.d/3659.feature deleted file mode 100644 index a5b4821c09..0000000000 --- a/changelog.d/3659.feature +++ /dev/null @@ -1 +0,0 @@ -Support profile API endpoints on workers diff --git a/changelog.d/3673.misc b/changelog.d/3673.misc deleted file mode 100644 index d672111fb9..0000000000 --- a/changelog.d/3673.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor state module to support multiple room versions diff --git a/changelog.d/3680.feature b/changelog.d/3680.feature deleted file mode 100644 index 4edaaf76a8..0000000000 --- a/changelog.d/3680.feature +++ /dev/null @@ -1 +0,0 @@ -Server notices for resource limit blocking diff --git a/changelog.d/3722.bugfix b/changelog.d/3722.bugfix deleted file mode 100644 index 16cbaf76cb..0000000000 --- a/changelog.d/3722.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues diff --git a/changelog.d/3724.feature b/changelog.d/3724.feature deleted file mode 100644 index 1b374ccf47..0000000000 --- a/changelog.d/3724.feature +++ /dev/null @@ -1 +0,0 @@ -Allow guests to use /rooms/:roomId/event/:eventId diff --git a/changelog.d/3725.misc b/changelog.d/3725.misc deleted file mode 100644 index 91ab9d7137..0000000000 --- a/changelog.d/3725.misc +++ /dev/null @@ -1 +0,0 @@ -The synapse.storage module has been ported to Python 3. diff --git a/changelog.d/3726.misc b/changelog.d/3726.misc deleted file mode 100644 index c4f66ec998..0000000000 --- a/changelog.d/3726.misc +++ /dev/null @@ -1 +0,0 @@ -Split the state_group_cache into member and non-member state events (and so speed up LL /sync) diff --git a/changelog.d/3727.misc b/changelog.d/3727.misc deleted file mode 100644 index 0b83220d90..0000000000 --- a/changelog.d/3727.misc +++ /dev/null @@ -1 +0,0 @@ -Log failure to authenticate remote servers as warnings (without stack traces) diff --git a/changelog.d/3730.misc b/changelog.d/3730.misc deleted file mode 100644 index b1ea84f732..0000000000 --- a/changelog.d/3730.misc +++ /dev/null @@ -1 +0,0 @@ -The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. diff --git a/changelog.d/3734.misc b/changelog.d/3734.misc deleted file mode 100644 index 4f6e4b3848..0000000000 --- a/changelog.d/3734.misc +++ /dev/null @@ -1 +0,0 @@ -Reference the need for an HTTP replication port when using the federation_reader worker diff --git a/changelog.d/3735.misc b/changelog.d/3735.misc deleted file mode 100644 index f17004be71..0000000000 --- a/changelog.d/3735.misc +++ /dev/null @@ -1 +0,0 @@ -Fix minor spelling error in federation client documentation. diff --git a/changelog.d/3737.misc b/changelog.d/3737.misc deleted file mode 100644 index 0361da4ebc..0000000000 --- a/changelog.d/3737.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant state resolution function diff --git a/changelog.d/3740.misc b/changelog.d/3740.misc deleted file mode 100644 index 4dcb7fb5de..0000000000 --- a/changelog.d/3740.misc +++ /dev/null @@ -1 +0,0 @@ -The test suite now passes on PostgreSQL. diff --git a/changelog.d/3746.misc b/changelog.d/3746.misc deleted file mode 100644 index fc00ee773a..0000000000 --- a/changelog.d/3746.misc +++ /dev/null @@ -1 +0,0 @@ -Fix MAU cache invalidation due to missing yield diff --git a/changelog.d/3747.bugfix b/changelog.d/3747.bugfix deleted file mode 100644 index c41e2a1213..0000000000 --- a/changelog.d/3747.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we resent "limit exceeded" server notices repeatedly diff --git a/changelog.d/3749.feature b/changelog.d/3749.feature deleted file mode 100644 index 9f8837b106..0000000000 --- a/changelog.d/3749.feature +++ /dev/null @@ -1 +0,0 @@ -Add mau_trial_days config param, so that users only get counted as MAU after N days. diff --git a/changelog.d/3751.feature b/changelog.d/3751.feature deleted file mode 100644 index dc9742b15b..0000000000 --- a/changelog.d/3751.feature +++ /dev/null @@ -1 +0,0 @@ -Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). diff --git a/changelog.d/3753.bugfix b/changelog.d/3753.bugfix deleted file mode 100644 index b4301267df..0000000000 --- a/changelog.d/3753.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices diff --git a/changelog.d/3754.bugfix b/changelog.d/3754.bugfix deleted file mode 100644 index 6e3ec80194..0000000000 --- a/changelog.d/3754.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic diff --git a/changelog.d/3755.bugfix b/changelog.d/3755.bugfix deleted file mode 100644 index 6a1f83f0ce..0000000000 --- a/changelog.d/3755.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tagging of server notice rooms diff --git a/changelog.d/3756.bugfix b/changelog.d/3756.bugfix deleted file mode 100644 index 6a1f83f0ce..0000000000 --- a/changelog.d/3756.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tagging of server notice rooms diff --git a/changelog.d/3758.bugfix b/changelog.d/3758.bugfix deleted file mode 100644 index 862739bfe8..0000000000 --- a/changelog.d/3758.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. diff --git a/changelog.d/3760.bugfix b/changelog.d/3760.bugfix deleted file mode 100644 index ce61fb8a2b..0000000000 --- a/changelog.d/3760.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't return non-LL-member state in incremental sync state blocks diff --git a/changelog.d/3764.misc b/changelog.d/3764.misc deleted file mode 100644 index f3614f1982..0000000000 --- a/changelog.d/3764.misc +++ /dev/null @@ -1 +0,0 @@ -Make sure that we close db connections opened during init \ No newline at end of file diff --git a/changelog.d/3768.bugfix b/changelog.d/3768.bugfix deleted file mode 100644 index a039a7fa68..0000000000 --- a/changelog.d/3768.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in sending presence over federation diff --git a/changelog.d/3777.bugfix b/changelog.d/3777.bugfix deleted file mode 100644 index 46efc543a7..0000000000 --- a/changelog.d/3777.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where preserved threepid user comes to sign up and server is mau blocked diff --git a/synapse/__init__.py b/synapse/__init__.py index e62901b761..2c42893673 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.3" +__version__ = "0.33.4rc1" -- cgit 1.5.1 From 87c18d12ee30c84a108ac686454720d6ac6a2f84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 15:18:25 +0100 Subject: Implement 'event_format' filter param in /sync This has been specced and part-implemented; let's implement it for /sync (but no other endpoints yet :/). --- synapse/api/filtering.py | 1 + synapse/rest/client/v2_alpha/sync.py | 51 +++++++++++++++++++++++++++--------- 2 files changed, 39 insertions(+), 13 deletions(-) (limited to 'synapse') diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 186831e118..a31a9a17e0 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -251,6 +251,7 @@ class FilterCollection(object): "include_leave", False ) self.event_fields = filter_json.get("event_fields", []) + self.event_format = filter_json.get("event_format", "client") def __repr__(self): return "" % (json.dumps(self._filter_json),) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 1275baa1ba..263d8eb73e 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -25,6 +25,7 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection from synapse.events.utils import ( format_event_for_client_v2_without_room_id, + format_event_raw, serialize_event, ) from synapse.handlers.presence import format_user_presence_state @@ -175,17 +176,28 @@ class SyncRestServlet(RestServlet): @staticmethod def encode_response(time_now, sync_result, access_token_id, filter): + if filter.event_format == 'client': + event_formatter = format_event_for_client_v2_without_room_id + elif filter.event_format == 'federation': + event_formatter = format_event_raw + else: + raise Exception("Unknown event format %s" % (filter.event_format, )) + joined = SyncRestServlet.encode_joined( - sync_result.joined, time_now, access_token_id, filter.event_fields + sync_result.joined, time_now, access_token_id, + filter.event_fields, + event_formatter, ) invited = SyncRestServlet.encode_invited( sync_result.invited, time_now, access_token_id, + event_formatter, ) archived = SyncRestServlet.encode_archived( sync_result.archived, time_now, access_token_id, filter.event_fields, + event_formatter, ) return { @@ -228,7 +240,7 @@ class SyncRestServlet(RestServlet): } @staticmethod - def encode_joined(rooms, time_now, token_id, event_fields): + def encode_joined(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the joined rooms in a sync result @@ -240,7 +252,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the joined rooms list, in our response format @@ -248,13 +262,14 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, only_fields=event_fields + room, time_now, token_id, joined=True, only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_invited(rooms, time_now, token_id): + def encode_invited(rooms, time_now, token_id, event_formatter): """ Encode the invited rooms in a sync result @@ -264,7 +279,9 @@ class SyncRestServlet(RestServlet): time_now(int): current time - used as a baseline for age calculations token_id(int): ID of the user's auth token - used for namespacing - of transaction IDs + of transaction IDs + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the invited rooms list, in our @@ -274,7 +291,7 @@ class SyncRestServlet(RestServlet): for room in rooms: invite = serialize_event( room.invite, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, is_invite=True, ) unsigned = dict(invite.get("unsigned", {})) @@ -288,7 +305,7 @@ class SyncRestServlet(RestServlet): return invited @staticmethod - def encode_archived(rooms, time_now, token_id, event_fields): + def encode_archived(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the archived rooms in a sync result @@ -300,7 +317,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: The invited rooms list, in our response format @@ -308,13 +327,18 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, joined=False, only_fields=event_fields + room, time_now, token_id, joined=False, + only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_room(room, time_now, token_id, joined=True, only_fields=None): + def encode_room( + room, time_now, token_id, joined, + only_fields, event_formatter, + ): """ Args: room (JoinedSyncResult|ArchivedSyncResult): sync result for a @@ -326,14 +350,15 @@ class SyncRestServlet(RestServlet): joined (bool): True if the user is joined to this room - will mean we handle ephemeral events only_fields(list): Optional. The list of event fields to include. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, object]: the room, encoded in our response format """ def serialize(event): - # TODO(mjark): Respect formatting requirements in the filter. return serialize_event( event, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, only_event_fields=only_fields, ) -- cgit 1.5.1 From 5f02017aead05adf127b89d7e203849982bd7fdc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 10:20:40 +0100 Subject: Improve performance of getting typing updates for replication Fetching the list of all new typing notifications involved iterating over all rooms and comparing their serial. Lets move to using a stream change cache, like we do for other streams. --- synapse/handlers/typing.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 2d2d3d5a0d..65f475d639 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.types import UserID, get_domain_from_id +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -68,6 +69,11 @@ class TypingHandler(object): # map room IDs to sets of users currently typing self._room_typing = {} + # caches which room_ids changed at which serials + self._typing_stream_change_cache = StreamChangeCache( + "TypingStreamChangeCache", self._latest_room_serial, + ) + self.clock.looping_call( self._handle_timeouts, 5000, @@ -274,19 +280,29 @@ class TypingHandler(object): self._latest_room_serial += 1 self._room_serials[member.room_id] = self._latest_room_serial + self._typing_stream_change_cache.entity_has_changed( + member.room_id, self._latest_room_serial, + ) self.notifier.on_new_event( "typing_key", self._latest_room_serial, rooms=[member.room_id] ) def get_all_typing_updates(self, last_id, current_id): - # TODO: Work out a way to do this without scanning the entire state. if last_id == current_id: return [] + changed_rooms = self._typing_stream_change_cache.get_all_entities_changed( + last_id, + ) + + if changed_rooms is None: + changed_rooms = self._room_serials + rows = [] - for room_id, serial in self._room_serials.items(): - if last_id < serial and serial <= current_id: + for room_id in changed_rooms: + serial = self._room_serials[room_id] + if last_id < serial <= current_id: typing = self._room_typing[room_id] rows.append((serial, room_id, list(typing))) rows.sort() -- cgit 1.5.1 From 804dd41e18c449e711e443398b95c9f6c68b6fa2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 01:09:12 +0100 Subject: Check that signatures on events are valid We should check that both the sender's server, and the server which created the event_id (which may be different from whatever the remote server has told us the origin is), have signed the event. --- synapse/federation/federation_base.py | 126 +++++++++++++++++++++++++++++----- 1 file changed, 110 insertions(+), 16 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index c11798093d..5be8e66fb8 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -13,17 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from collections import namedtuple import six from twisted.internet import defer +from twisted.internet.defer import DeferredList -from synapse.api.constants import MAX_DEPTH +from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.crypto.event_signing import check_event_content_hash from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict +from synapse.types import get_domain_from_id from synapse.util import logcontext, unwrapFirstError logger = logging.getLogger(__name__) @@ -133,34 +136,25 @@ class FederationBase(object): * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel logcontext. """ - - redacted_pdus = [ - prune_event(pdu) - for pdu in pdus - ] - - deferreds = self.keyring.verify_json_objects_for_server([ - (p.origin, p.get_pdu_json()) - for p in redacted_pdus - ]) + deferreds = _check_sigs_on_pdus(self.keyring, pdus) ctx = logcontext.LoggingContext.current_context() - def callback(_, pdu, redacted): + def callback(_, pdu): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): logger.warn( "Event content has been tampered, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) - return redacted + return prune_event(pdu) if self.spam_checker.check_event_for_spam(pdu): logger.warn( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) - return redacted + return prune_event(pdu) return pdu @@ -173,16 +167,116 @@ class FederationBase(object): ) return failure - for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): + for deferred, pdu in zip(deferreds, pdus): deferred.addCallbacks( callback, errback, - callbackArgs=[pdu, redacted], + callbackArgs=[pdu], errbackArgs=[pdu], ) return deferreds +class PduToCheckSig(namedtuple("PduToCheckSig", [ + "pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds", +])): + pass + + +def _check_sigs_on_pdus(keyring, pdus): + """Check that the given events are correctly signed + + Args: + keyring (synapse.crypto.Keyring): keyring object to do the checks + pdus (Collection[EventBase]): the events to be checked + + Returns: + List[Deferred]: a Deferred for each event in pdus, which will either succeed if + the signatures are valid, or fail (with a SynapseError) if not. + """ + + # (currently this is written assuming the v1 room structure; we'll probably want a + # separate function for checking v2 rooms) + + # we want to check that the event is signed by: + # + # (a) the server which created the event_id + # + # (b) the sender's server. + # + # - except in the case of invites created from a 3pid invite, which are exempt + # from this check, because the sender has to match that of the original 3pid + # invite, but the event may come from a different HS, for reasons that I don't + # entirely grok (why do the senders have to match? and if they do, why doesn't the + # joining server ask the inviting server to do the switcheroo with + # exchange_third_party_invite?). + # + # That's pretty awful, since redacting such an invite will render it invalid + # (because it will then look like a regular invite without a valid signature), + # and signatures are *supposed* to be valid whether or not an event has been + # redacted. But this isn't the worst of the ways that 3pid invites are broken. + # + # let's start by getting the domain for each pdu, and flattening the event back + # to JSON. + pdus_to_check = [ + PduToCheckSig( + pdu=p, + redacted_pdu_json=prune_event(p).get_pdu_json(), + event_id_domain=get_domain_from_id(p.event_id), + sender_domain=get_domain_from_id(p.sender), + deferreds=[], + ) + for p in pdus + ] + + # first make sure that the event is signed by the event_id's domain + deferreds = keyring.verify_json_objects_for_server([ + (p.event_id_domain, p.redacted_pdu_json) + for p in pdus_to_check + ]) + + for p, d in zip(pdus_to_check, deferreds): + p.deferreds.append(d) + + # now let's look for events where the sender's domain is different to the + # event id's domain (normally only the case for joins/leaves), and add additional + # checks. + pdus_to_check_sender = [ + p for p in pdus_to_check + if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu) + ] + + more_deferreds = keyring.verify_json_objects_for_server([ + (p.sender_domain, p.redacted_pdu_json) + for p in pdus_to_check_sender + ]) + + for p, d in zip(pdus_to_check_sender, more_deferreds): + p.deferreds.append(d) + + # replace lists of deferreds with single Deferreds + return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check] + + +def _flatten_deferred_list(deferreds): + """Given a list of one or more deferreds, either return the single deferred, or + combine into a DeferredList. + """ + if len(deferreds) > 1: + return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) + else: + assert len(deferreds) == 1 + return deferreds[0] + + +def _is_invite_via_3pid(event): + return ( + event.type == EventTypes.Member + and event.membership == Membership.INVITE + and "third_party_invite" in event.content + ) + + def event_from_pdu_json(pdu_json, outlier=False): """Construct a FrozenEvent from an event json received over federation -- cgit 1.5.1 From c127c8d0421f0228a46ebbe280c9537e8d8ea42b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 01:23:18 +0100 Subject: Fix origin handling for pushed transactions Use the actual origin for push transactions, rather than whatever the remote server claimed. --- synapse/federation/federation_server.py | 20 ++++++++++---------- synapse/federation/persistence.py | 8 ++++---- synapse/federation/transport/server.py | 2 +- tests/handlers/test_typing.py | 19 ++++++++----------- tests/utils.py | 12 +++++++++--- 5 files changed, 32 insertions(+), 29 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3e0cd294a1..547c6aec80 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,7 +99,7 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function - def on_incoming_transaction(self, transaction_data): + def on_incoming_transaction(self, origin, transaction_data): # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() @@ -108,34 +108,33 @@ class FederationServer(FederationBase): if not transaction.transaction_id: raise Exception("Transaction missing transaction_id") - if not transaction.origin: - raise Exception("Transaction missing origin") logger.debug("[%s] Got transaction", transaction.transaction_id) # use a linearizer to ensure that we don't process the same transaction # multiple times in parallel. with (yield self._transaction_linearizer.queue( - (transaction.origin, transaction.transaction_id), + (origin, transaction.transaction_id), )): result = yield self._handle_incoming_transaction( - transaction, request_time, + origin, transaction, request_time, ) defer.returnValue(result) @defer.inlineCallbacks - def _handle_incoming_transaction(self, transaction, request_time): + def _handle_incoming_transaction(self, origin, transaction, request_time): """ Process an incoming transaction and return the HTTP response Args: + origin (unicode): the server making the request transaction (Transaction): incoming transaction request_time (int): timestamp that the HTTP request arrived at Returns: Deferred[(int, object)]: http response code and body """ - response = yield self.transaction_actions.have_responded(transaction) + response = yield self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( @@ -149,7 +148,7 @@ class FederationServer(FederationBase): received_pdus_counter.inc(len(transaction.pdus)) - origin_host, _ = parse_server_name(transaction.origin) + origin_host, _ = parse_server_name(origin) pdus_by_room = {} @@ -190,7 +189,7 @@ class FederationServer(FederationBase): event_id = pdu.event_id try: yield self._handle_received_pdu( - transaction.origin, pdu + origin, pdu ) pdu_results[event_id] = {} except FederationError as e: @@ -212,7 +211,7 @@ class FederationServer(FederationBase): if hasattr(transaction, "edus"): for edu in (Edu(**x) for x in transaction.edus): yield self.received_edu( - transaction.origin, + origin, edu.edu_type, edu.content ) @@ -224,6 +223,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response( + origin, transaction, 200, response ) diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 9146215c21..74ffd13b4f 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -36,7 +36,7 @@ class TransactionActions(object): self.store = datastore @log_function - def have_responded(self, transaction): + def have_responded(self, origin, transaction): """ Have we already responded to a transaction with the same id and origin? @@ -50,11 +50,11 @@ class TransactionActions(object): "transaction_id") return self.store.get_received_txn_response( - transaction.transaction_id, transaction.origin + transaction.transaction_id, origin ) @log_function - def set_response(self, transaction, code, response): + def set_response(self, origin, transaction, code, response): """ Persist how we responded to a transaction. Returns: @@ -66,7 +66,7 @@ class TransactionActions(object): return self.store.set_received_txn_response( transaction.transaction_id, - transaction.origin, + origin, code, response, ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 77969a4f38..8cde9716ac 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet): try: code, response = yield self.handler.on_incoming_transaction( - transaction_data + origin, transaction_data, ) except Exception: logger.exception("on_incoming_transaction failed") diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ad58073a14..c2d951b45f 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -33,7 +33,7 @@ from ..utils import ( ) -def _expect_edu(destination, edu_type, content, origin="test"): +def _expect_edu_transaction(edu_type, content, origin="test"): return { "origin": origin, "origin_server_ts": 1000000, @@ -42,8 +42,8 @@ def _expect_edu(destination, edu_type, content, origin="test"): } -def _make_edu_json(origin, edu_type, content): - return json.dumps(_expect_edu("test", edu_type, content, origin=origin)).encode( +def _make_edu_transaction_json(edu_type, content): + return json.dumps(_expect_edu_transaction(edu_type, content)).encode( 'utf8' ) @@ -190,8 +190,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, @@ -221,11 +220,10 @@ class TypingNotificationsTestCase(unittest.TestCase): self.assertEquals(self.event_source.get_current_key(), 0) - yield self.mock_federation_resource.trigger( + (code, response) = yield self.mock_federation_resource.trigger( "PUT", "/_matrix/federation/v1/send/1000000/", - _make_edu_json( - "farm", + _make_edu_transaction_json( "m.typing", content={ "room_id": self.room_id, @@ -233,7 +231,7 @@ class TypingNotificationsTestCase(unittest.TestCase): "typing": True, }, ), - federation_auth=True, + federation_auth_origin=b'farm', ) self.on_new_event.assert_has_calls( @@ -264,8 +262,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, diff --git a/tests/utils.py b/tests/utils.py index bb0fc74054..8de2898b2f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -306,7 +306,10 @@ class MockHttpResource(HttpServer): @patch('twisted.web.http.Request') @defer.inlineCallbacks - def trigger(self, http_method, path, content, mock_request, federation_auth=False): + def trigger( + self, http_method, path, content, mock_request, + federation_auth_origin=None, + ): """ Fire an HTTP event. Args: @@ -315,6 +318,7 @@ class MockHttpResource(HttpServer): content : The HTTP body mock_request : Mocked request to pass to the event so it can get content. + federation_auth_origin (bytes|None): domain to authenticate as, for federation Returns: A tuple of (code, response) Raises: @@ -335,8 +339,10 @@ class MockHttpResource(HttpServer): mock_request.getClientIP.return_value = "-" headers = {} - if federation_auth: - headers[b"Authorization"] = [b"X-Matrix origin=test,key=,sig="] + if federation_auth_origin is not None: + headers[b"Authorization"] = [ + b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin, ) + ] mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) # return the right path if the event requires it -- cgit 1.5.1 From 2d2828dcbc2c6360d28a64d3849cf849eb5348c4 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 6 Sep 2018 00:10:47 +1000 Subject: Port http/ to Python 3 (#3771) --- changelog.d/3771.misc | 1 + setup.cfg | 5 +- synapse/appservice/api.py | 13 +- synapse/federation/federation_server.py | 10 +- synapse/http/client.py | 82 +++++-------- synapse/http/matrixfederationclient.py | 204 +++++++++++++------------------- synapse/http/site.py | 4 +- synapse/python_dependencies.py | 1 + 8 files changed, 134 insertions(+), 186 deletions(-) create mode 100644 changelog.d/3771.misc (limited to 'synapse') diff --git a/changelog.d/3771.misc b/changelog.d/3771.misc new file mode 100644 index 0000000000..47aa34bc04 --- /dev/null +++ b/changelog.d/3771.misc @@ -0,0 +1 @@ +http/ is now ported to Python 3. diff --git a/setup.cfg b/setup.cfg index c2620be6c5..52feaa9cc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,13 +17,14 @@ ignore = [pep8] max-line-length = 90 # W503 requires that binary operators be at the end, not start, of lines. Erik -# doesn't like it. E203 is contrary to PEP8. -ignore = W503,E203 +# doesn't like it. E203 is contrary to PEP8. E731 is silly. +ignore = W503,E203,E731 [flake8] # note that flake8 inherits the "ignore" settings from "pep8" (because it uses # pep8 to do those checks), but not the "max-line-length" setting max-line-length = 90 +ignore=W503,E203,E731 [isort] line_length = 89 diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 6980e5890e..9ccc5a80fc 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import urllib + +from six.moves import urllib from prometheus_client import Counter @@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_user(self, service, user_id): if service.url is None: defer.returnValue(False) - uri = service.url + ("/users/%s" % urllib.quote(user_id)) + uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, { @@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_alias(self, service, alias): if service.url is None: defer.returnValue(False) - uri = service.url + ("/rooms/%s" % urllib.quote(alias)) + uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, { @@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient): service.url, APP_SERVICE_PREFIX, kind, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: response = yield self.get_json(uri, fields) @@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient): uri = "%s%s/thirdparty/protocol/%s" % ( service.url, APP_SERVICE_PREFIX, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: info = yield self.get_json(uri, {}) @@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient): txn_id = str(txn_id) uri = service.url + ("/transactions/%s" % - urllib.quote(txn_id)) + urllib.parse.quote(txn_id)) try: yield self.put_json( uri=uri, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3e0cd294a1..6e52c4b6b5 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -838,9 +838,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): ) return self._send_edu( - edu_type=edu_type, - origin=origin, - content=content, + edu_type=edu_type, + origin=origin, + content=content, ) def on_query(self, query_type, args): @@ -851,6 +851,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): return handler(args) return self._get_query_client( - query_type=query_type, - args=args, + query_type=query_type, + args=args, ) diff --git a/synapse/http/client.py b/synapse/http/client.py index ab4fbf59b2..4ba54fed05 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -13,24 +13,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import logging -import urllib -from six import StringIO +from six import text_type +from six.moves import urllib +import treq from canonicaljson import encode_canonical_json, json from prometheus_client import Counter from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE -from twisted.internet import defer, protocol, reactor, ssl, task +from twisted.internet import defer, protocol, reactor, ssl from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.web._newclient import ResponseDone from twisted.web.client import ( Agent, BrowserLikeRedirectAgent, ContentDecoderAgent, - FileBodyProducer as TwistedFileBodyProducer, GzipDecoder, HTTPConnectionPool, PartialDownloadError, @@ -83,18 +84,20 @@ class SimpleHttpClient(object): if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,) + self.user_agent = self.user_agent.encode('ascii') + @defer.inlineCallbacks - def request(self, method, uri, *args, **kwargs): + def request(self, method, uri, data=b'', headers=None): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) - logger.info("Sending request %s %s", method, redact_uri(uri)) + logger.info("Sending request %s %s", method, redact_uri(uri.encode('ascii'))) try: - request_deferred = self.agent.request( - method, uri, *args, **kwargs + request_deferred = treq.request( + method, uri, agent=self.agent, data=data, headers=headers ) add_timeout_to_deferred( request_deferred, 60, self.hs.get_reactor(), @@ -105,14 +108,14 @@ class SimpleHttpClient(object): incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", - method, redact_uri(uri), response.code + method, redact_uri(uri.encode('ascii')), response.code ) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", - method, redact_uri(uri), type(e).__name__, e.message + method, redact_uri(uri.encode('ascii')), type(e).__name__, e.args[0] ) raise @@ -137,7 +140,8 @@ class SimpleHttpClient(object): # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode( + encode_urlencode_args(args), True).encode("utf8") actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], @@ -148,15 +152,14 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(query_bytes)) + data=query_bytes ) - body = yield make_deferred_yieldable(readBody(response)) - if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + body = yield make_deferred_yieldable(treq.json_content(response)) + defer.returnValue(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -191,9 +194,9 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -248,7 +251,7 @@ class SimpleHttpClient(object): ValueError: if the response was not JSON """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) json_str = encode_canonical_json(json_body) @@ -262,9 +265,9 @@ class SimpleHttpClient(object): response = yield self.request( "PUT", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -293,7 +296,7 @@ class SimpleHttpClient(object): HttpResponseException on a non-2xx HTTP response. """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) actual_headers = { @@ -304,7 +307,7 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), ) @@ -339,7 +342,7 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - url.encode("ascii"), + url, headers=Headers(actual_headers), ) @@ -434,12 +437,12 @@ class CaptchaServerHttpClient(SimpleHttpClient): @defer.inlineCallbacks def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) response = yield self.request( "POST", - url.encode("ascii"), - bodyProducer=FileBodyProducer(StringIO(query_bytes)), + url, + data=query_bytes, headers=Headers({ b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], @@ -510,7 +513,7 @@ def encode_urlencode_args(args): def encode_urlencode_arg(arg): - if isinstance(arg, unicode): + if isinstance(arg, text_type): return arg.encode('utf-8') elif isinstance(arg, list): return [encode_urlencode_arg(i) for i in arg] @@ -542,26 +545,3 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def creatorForNetloc(self, hostname, port): return self - - -class FileBodyProducer(TwistedFileBodyProducer): - """Workaround for https://twistedmatrix.com/trac/ticket/8473 - - We override the pauseProducing and resumeProducing methods in twisted's - FileBodyProducer so that they do not raise exceptions if the task has - already completed. - """ - - def pauseProducing(self): - try: - super(FileBodyProducer, self).pauseProducing() - except task.TaskDone: - # task has already completed - pass - - def resumeProducing(self): - try: - super(FileBodyProducer, self).resumeProducing() - except task.NotPaused: - # task was not paused (probably because it had already completed) - pass diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b34bb8e31a..6a1fc8ca55 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,19 +17,19 @@ import cgi import logging import random import sys -import urllib -from six import string_types -from six.moves.urllib import parse as urlparse +from six import PY3, string_types +from six.moves import urllib -from canonicaljson import encode_canonical_json, json +import treq +from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json from twisted.internet import defer, protocol, reactor from twisted.internet.error import DNSLookupError from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, readBody +from twisted.web.client import Agent, HTTPConnectionPool from twisted.web.http_headers import Headers import synapse.metrics @@ -58,13 +58,18 @@ incoming_responses_counter = Counter("synapse_http_matrixfederationclient_respon MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 +if PY3: + MAXINT = sys.maxsize +else: + MAXINT = sys.maxint + class MatrixFederationEndpointFactory(object): def __init__(self, hs): self.tls_client_options_factory = hs.tls_client_options_factory def endpointForURI(self, uri): - destination = uri.netloc + destination = uri.netloc.decode('ascii') return matrix_federation_endpoint( reactor, destination, timeout=10, @@ -93,26 +98,32 @@ class MatrixFederationHttpClient(object): ) self.clock = hs.get_clock() self._store = hs.get_datastore() - self.version_string = hs.version_string + self.version_string = hs.version_string.encode('ascii') self._next_id = 1 def _create_url(self, destination, path_bytes, param_bytes, query_bytes): - return urlparse.urlunparse( - ("matrix", destination, path_bytes, param_bytes, query_bytes, "") + return urllib.parse.urlunparse( + (b"matrix", destination, path_bytes, param_bytes, query_bytes, b"") ) @defer.inlineCallbacks def _request(self, destination, method, path, - body_callback, headers_dict={}, param_bytes=b"", - query_bytes=b"", retry_on_dns_fail=True, + json=None, json_callback=None, + param_bytes=b"", + query=None, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): - """ Creates and sends a request to the given server + """ + Creates and sends a request to the given server. + Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path + json (dict or None): JSON to send in the body. + json_callback (func or None): A callback to generate the JSON. + query (dict or None): Query arguments. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 @@ -146,22 +157,29 @@ class MatrixFederationHttpClient(object): ignore_backoff=ignore_backoff, ) - destination = destination.encode("ascii") + headers_dict = {} path_bytes = path.encode("ascii") - with limiter: - headers_dict[b"User-Agent"] = [self.version_string] - headers_dict[b"Host"] = [destination] + if query: + query_bytes = encode_query_args(query) + else: + query_bytes = b"" - url_bytes = self._create_url( - destination, path_bytes, param_bytes, query_bytes - ) + headers_dict = { + "User-Agent": [self.version_string], + "Host": [destination], + } + + with limiter: + url = self._create_url( + destination.encode("ascii"), path_bytes, param_bytes, query_bytes + ).decode('ascii') txn_id = "%s-O-%s" % (method, self._next_id) - self._next_id = (self._next_id + 1) % (sys.maxint - 1) + self._next_id = (self._next_id + 1) % (MAXINT - 1) outbound_logger.info( "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url_bytes + txn_id, destination, method, url ) # XXX: Would be much nicer to retry only at the transaction-layer @@ -171,23 +189,33 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - http_url_bytes = urlparse.urlunparse( - ("", "", path_bytes, param_bytes, query_bytes, "") - ) + http_url = urllib.parse.urlunparse( + (b"", b"", path_bytes, param_bytes, query_bytes, b"") + ).decode('ascii') log_result = None try: while True: - producer = None - if body_callback: - producer = body_callback(method, http_url_bytes, headers_dict) - try: - request_deferred = self.agent.request( + if json_callback: + json = json_callback() + + if json: + data = encode_canonical_json(json) + headers_dict["Content-Type"] = ["application/json"] + self.sign_request( + destination, method, http_url, headers_dict, json + ) + else: + data = None + self.sign_request(destination, method, http_url, headers_dict) + + request_deferred = treq.request( method, - url_bytes, - Headers(headers_dict), - producer + url, + headers=Headers(headers_dict), + data=data, + agent=self.agent, ) add_timeout_to_deferred( request_deferred, @@ -218,7 +246,7 @@ class MatrixFederationHttpClient(object): txn_id, destination, method, - url_bytes, + url, _flatten_response_never_received(e), ) @@ -252,7 +280,7 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.content(response) raise HttpResponseException( response.code, response.phrase, body ) @@ -297,11 +325,11 @@ class MatrixFederationHttpClient(object): auth_headers = [] for key, sig in request["signatures"][self.server_name].items(): - auth_headers.append(bytes( + auth_headers.append(( "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( self.server_name, key, sig, - ) - )) + )).encode('ascii') + ) headers_dict[b"Authorization"] = auth_headers @@ -347,24 +375,14 @@ class MatrixFederationHttpClient(object): """ if not json_data_callback: - def json_data_callback(): - return data - - def body_callback(method, url_bytes, headers_dict): - json_data = json_data_callback() - self.sign_request( - destination, method, url_bytes, headers_dict, json_data - ) - producer = _JsonProducer(json_data) - return producer + json_data_callback = lambda: data response = yield self._request( destination, "PUT", path, - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, - query_bytes=encode_query_args(args), + json_callback=json_data_callback, + query=args, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -376,8 +394,8 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - defer.returnValue(json.loads(body)) + body = yield treq.json_content(response) + defer.returnValue(body) @defer.inlineCallbacks def post_json(self, destination, path, data={}, long_retries=False, @@ -410,20 +428,12 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - - def body_callback(method, url_bytes, headers_dict): - self.sign_request( - destination, method, url_bytes, headers_dict, data - ) - return _JsonProducer(data) - response = yield self._request( destination, "POST", path, - query_bytes=encode_query_args(args), - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, + query=args, + json=data, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -434,9 +444,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def get_json(self, destination, path, args=None, retry_on_dns_fail=True, @@ -471,16 +481,11 @@ class MatrixFederationHttpClient(object): logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None - response = yield self._request( destination, "GET", path, - query_bytes=encode_query_args(args), - body_callback=body_callback, + query=args, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, @@ -491,9 +496,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def delete_json(self, destination, path, long_retries=False, @@ -523,13 +528,11 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( destination, "DELETE", path, - query_bytes=encode_query_args(args), - headers_dict={"Content-Type": ["application/json"]}, + query=args, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -540,9 +543,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def get_file(self, destination, path, output_stream, args={}, @@ -569,26 +572,11 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - - encoded_args = {} - for k, vs in args.items(): - if isinstance(vs, string_types): - vs = [vs] - encoded_args[k] = [v.encode("UTF-8") for v in vs] - - query_bytes = urllib.urlencode(encoded_args, True) - logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail) - - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None - response = yield self._request( destination, "GET", path, - query_bytes=query_bytes, - body_callback=body_callback, + query=args, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff, ) @@ -639,30 +627,6 @@ def _readBodyToFile(response, stream, max_size): return d -class _JsonProducer(object): - """ Used by the twisted http client to create the HTTP body from json - """ - def __init__(self, jsn): - self.reset(jsn) - - def reset(self, jsn): - self.body = encode_canonical_json(jsn) - self.length = len(self.body) - - def startProducing(self, consumer): - consumer.write(self.body) - return defer.succeed(None) - - def pauseProducing(self): - pass - - def stopProducing(self): - pass - - def resumeProducing(self): - pass - - def _flatten_response_never_received(e): if hasattr(e, "reasons"): reasons = ", ".join( @@ -693,7 +657,7 @@ def check_content_type_is_json(headers): "No Content-Type header" ) - c_type = c_type[0] # only the first header + c_type = c_type[0].decode('ascii') # only the first header val, options = cgi.parse_header(c_type) if val != "application/json": raise RuntimeError( @@ -711,6 +675,6 @@ def encode_query_args(args): vs = [vs] encoded_args[k] = [v.encode("UTF-8") for v in vs] - query_bytes = urllib.urlencode(encoded_args, True) + query_bytes = urllib.parse.urlencode(encoded_args, True) - return query_bytes + return query_bytes.encode('utf8') diff --git a/synapse/http/site.py b/synapse/http/site.py index 88ed3714f9..f0828c6542 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -204,14 +204,14 @@ class SynapseRequest(Request): self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( - self.start_time, name=servlet_name, method=self.method, + self.start_time, name=servlet_name, method=self.method.decode('ascii'), ) self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, - self.method, + self.method.decode('ascii'), self.get_redacted_uri() ) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 942d7c721f..6dd5179320 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -40,6 +40,7 @@ REQUIREMENTS = { "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], "Twisted>=17.1.0": ["twisted>=17.1.0"], + "treq>=15.1": ["treq>=15.1"], # We use crypto.get_elliptic_curve which is only supported in >=0.15 "pyopenssl>=0.15": ["OpenSSL>=0.15"], -- cgit 1.5.1 From 7419764351aa2b2f00f363beaee6ec53c02c492e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 16:19:50 +0100 Subject: User iter* during sync state calculations --- synapse/handlers/sync.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ef20c2296c..0965cf2fc5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1729,17 +1729,17 @@ def _calculate_state( event_id_to_key = { e: key for key, e in itertools.chain( - timeline_contains.items(), - previous.items(), - timeline_start.items(), - current.items(), + iteritems(timeline_contains), + iteritems(previous), + iteritems(timeline_start), + iteritems(current), ) } - c_ids = set(e for e in current.values()) - ts_ids = set(e for e in timeline_start.values()) - p_ids = set(e for e in previous.values()) - tc_ids = set(e for e in timeline_contains.values()) + c_ids = set(e for e in itervalues(current)) + ts_ids = set(e for e in itervalues(timeline_start)) + p_ids = set(e for e in itervalues(previous)) + tc_ids = set(e for e in itervalues(timeline_contains)) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, -- cgit 1.5.1 From 61b05727fa6ba879f507d13ff40c0197916ef795 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Sep 2018 22:30:36 +0100 Subject: guest users should not be part of mau total --- synapse/storage/monthly_active_users.py | 7 ++++- tests/storage/test_monthly_active_users.py | 44 ++++++++++++++++++++++++++++-- 2 files changed, 48 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index c7899d7fd2..e34e16c079 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -192,14 +192,19 @@ class MonthlyActiveUsersStore(SQLBaseStore): )) @defer.inlineCallbacks - def populate_monthly_active_users(self, user_id): + def populate_monthly_active_users(self, user_id, is_guest=False): """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables Args: user_id(str): the user_id to query """ + if self.hs.config.limit_usage_by_mau: + # Guests should not be included as part of MAU group + if is_guest: + return + is_trial = yield self.is_trial_user(user_id) if is_trial: # we don't track trial users in the MAU table. diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 2036287288..cfe0a7c77d 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + +from twisted.internet import defer from tests.unittest import HomeserverTestCase @@ -23,7 +26,8 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): hs = self.setup_test_homeserver() self.store = hs.get_datastore() - + hs.config.limit_usage_by_mau = True + hs.config.max_mau_value = 50 # Advance the clock a bit reactor.advance(FORTY_DAYS) @@ -73,7 +77,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): active_count = self.store.get_monthly_active_count() self.assertEquals(self.get_success(active_count), user_num) - # Test that regalar users are removed from the db + # Test that regular users are removed from the db ru_count = 2 self.store.upsert_monthly_active_user("@ru1:server") self.store.upsert_monthly_active_user("@ru2:server") @@ -139,3 +143,39 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): count = self.store.get_monthly_active_count() self.assertEquals(self.get_success(count), 0) + + def test_populate_monthly_users_is_guest(self): + # Test that guest users are not added to mau list + self.store.upsert_monthly_active_user = Mock() + self.store.populate_monthly_active_users('user_id', True) + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() + + def test_populate_monthly_users_should_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed(None) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_called_once() + + def test_populate_monthly_users_should_not_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed( + self.hs.get_clock().time_msec() + ) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() -- cgit 1.5.1 From 80189ed27c5c1b390cc9783b2931e71cccdb3c74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Sep 2018 17:03:49 +0100 Subject: prepare v0.33.3.1 --- CHANGES.md | 14 ++++++++++++++ changelog.d/3802.misc | 1 - synapse/__init__.py | 2 +- 3 files changed, 15 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/3802.misc (limited to 'synapse') diff --git a/CHANGES.md b/CHANGES.md index a35f5aebc7..59feb6b1a7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,17 @@ +Synapse 0.33.3.1 (2018-09-06) +============================= + +SECURITY FIXES +-------------- + +- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) +- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) + +Internal Changes +---------------- + +- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802)) + Synapse 0.33.3 (2018-08-22) =========================== diff --git a/changelog.d/3802.misc b/changelog.d/3802.misc deleted file mode 100644 index a00eccb283..0000000000 --- a/changelog.d/3802.misc +++ /dev/null @@ -1 +0,0 @@ -Unignore synctl in .dockerignore to fix docker builds diff --git a/synapse/__init__.py b/synapse/__init__.py index e62901b761..e395a0a9ee 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.3" +__version__ = "0.33.3.1" -- cgit 1.5.1 From 625542878de2049a7a0b8381a75a77d09c931a82 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 12:53:15 +0100 Subject: bump dep on pyopenssl to 16.x --- synapse/python_dependencies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6dd5179320..0d8de600cf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -42,8 +42,8 @@ REQUIREMENTS = { "Twisted>=17.1.0": ["twisted>=17.1.0"], "treq>=15.1": ["treq>=15.1"], - # We use crypto.get_elliptic_curve which is only supported in >=0.15 - "pyopenssl>=0.15": ["OpenSSL>=0.15"], + # Twisted has required pyopenssl 16.0 since about Twisted 16.6. + "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], -- cgit 1.5.1 From 417e7077aadb9f9832dabb0fa93e39e58df92a8a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 14:08:55 +0100 Subject: Bump version and changelog --- CHANGES.md | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/CHANGES.md b/CHANGES.md index 4db47da3e3..fecfa7feec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.33.4rc2 (2018-09-06) +============================== + +Pull in security fixes from v0.33.3.1 + + Synapse 0.33.3.1 (2018-09-06) ============================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 2c42893673..063c304534 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.4rc1" +__version__ = "0.33.4rc2" -- cgit 1.5.1 From 2608ebc04c8b7ffb3417eeb720801d70ddc7dccd Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 00:22:23 +1000 Subject: Port handlers/ to Python 3 (#3803) --- .gitignore | 1 + changelog.d/3803.misc | 1 + synapse/handlers/auth.py | 8 +++++--- synapse/handlers/e2e_keys.py | 5 +++-- synapse/handlers/federation.py | 6 +++--- synapse/handlers/room_list.py | 2 +- synapse/handlers/search.py | 14 +++++++------- synapse/handlers/sync.py | 6 +++--- 8 files changed, 24 insertions(+), 19 deletions(-) create mode 100644 changelog.d/3803.misc (limited to 'synapse') diff --git a/.gitignore b/.gitignore index 9f42a7568f..1718185384 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ media_store/ build/ venv/ venv*/ +*venv/ localhost-800*/ static/client/register/register_config.js diff --git a/changelog.d/3803.misc b/changelog.d/3803.misc new file mode 100644 index 0000000000..2b60653c29 --- /dev/null +++ b/changelog.d/3803.misc @@ -0,0 +1 @@ +handlers/ is now ported to Python 3. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4a81bd2ba9..2a5eab124f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -895,22 +895,24 @@ class AuthHandler(BaseHandler): Args: password (unicode): Password to hash. - stored_hash (unicode): Expected hash value. + stored_hash (bytes): Expected hash value. Returns: Deferred(bool): Whether self.hash(password) == stored_hash. """ - def _do_validate_hash(): # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) return bcrypt.checkpw( pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"), - stored_hash.encode('utf8') + stored_hash ) if stored_hash: + if not isinstance(stored_hash, bytes): + stored_hash = stored_hash.encode('ascii') + return make_deferred_yieldable( threads.deferToThreadPool( self.hs.get_reactor(), diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 5816bf8b4f..578e9250fb 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -330,7 +330,8 @@ class E2eKeysHandler(object): (algorithm, key_id, ex_json, key) ) else: - new_keys.append((algorithm, key_id, encode_canonical_json(key))) + new_keys.append(( + algorithm, key_id, encode_canonical_json(key).decode('ascii'))) yield self.store.add_e2e_one_time_keys( user_id, device_id, time_now, new_keys @@ -358,7 +359,7 @@ def _exception_to_failure(e): # Note that some Exceptions (notably twisted's ResponseFailed etc) don't # give a string for e.message, which json then fails to serialize. return { - "status": 503, "message": str(e.message), + "status": 503, "message": str(e), } diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3fa7a98445..0c68e8a472 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -594,7 +594,7 @@ class FederationHandler(BaseHandler): required_auth = set( a_id - for event in events + state_events.values() + auth_events.values() + for event in events + list(state_events.values()) + list(auth_events.values()) for a_id, _ in event.auth_events ) auth_events.update({ @@ -802,7 +802,7 @@ class FederationHandler(BaseHandler): ) continue except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: logger.info(e) @@ -1358,7 +1358,7 @@ class FederationHandler(BaseHandler): ) if state_groups: - _, state = state_groups.items().pop() + _, state = list(state_groups.items()).pop() results = state if event.is_state(): diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 37e41afd61..38e1737ec9 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler): # Filter out rooms that we don't want to return rooms_to_scan = [ r for r in sorted_rooms - if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 + if r not in newly_unpublished and rooms_to_num_joined[r] > 0 ] total_room_count = len(rooms_to_scan) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index c464adbd0b..0c1d52fd11 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -54,7 +54,7 @@ class SearchHandler(BaseHandler): batch_token = None if batch: try: - b = decode_base64(batch) + b = decode_base64(batch).decode('ascii') batch_group, batch_group_key, batch_token = b.split("\n") assert batch_group is not None @@ -258,18 +258,18 @@ class SearchHandler(BaseHandler): # it returns more from the same group (if applicable) rather # than reverting to searching all results again. if batch_group and batch_group_key: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( batch_group, batch_group_key, pagination_token - )) + )).encode('ascii')) else: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( "all", "", pagination_token - )) + )).encode('ascii')) for room_id, group in room_groups.items(): - group["next_batch"] = encode_base64("%s\n%s\n%s" % ( + group["next_batch"] = encode_base64(("%s\n%s\n%s" % ( "room_id", room_id, pagination_token - )) + )).encode('ascii')) allowed_events.extend(room_events) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ef20c2296c..0091ceb80e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -545,7 +545,7 @@ class SyncHandler(object): member_ids = { state_key: event_id - for (t, state_key), event_id in state_ids.iteritems() + for (t, state_key), event_id in iteritems(state_ids) if t == EventTypes.Member } name_id = state_ids.get((EventTypes.Name, '')) @@ -774,7 +774,7 @@ class SyncHandler(object): logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id - for t, event_id in state_ids.iteritems() + for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) @@ -1753,7 +1753,7 @@ def _calculate_state( if lazy_load_members: p_ids.difference_update( - e for t, e in timeline_start.iteritems() + e for t, e in iteritems(timeline_start) if t[0] == EventTypes.Member ) -- cgit 1.5.1 From 6707a3212cea6849d1c16b70cc1927b16d06814a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:23:55 +0100 Subject: Limit the number of PDUs/EDUs per fedreation transaction --- synapse/federation/transaction_queue.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'synapse') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 94d7423d01..8cbf8c4f7f 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -463,7 +463,19 @@ class TransactionQueue(object): # pending_transactions flag. pending_pdus = self.pending_pdus_by_dest.pop(destination, []) + + # We can only include at most 50 PDUs per transactions + pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:] + if leftover_pdus: + self.pending_pdus_by_dest[destination] = leftover_pdus + pending_edus = self.pending_edus_by_dest.pop(destination, []) + + # We can only include at most 100 EDUs per transactions + pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:] + if leftover_edus: + self.pending_edus_by_dest[destination] = leftover_edus + pending_presence = self.pending_presence_by_dest.pop(destination, {}) pending_edus.extend( -- cgit 1.5.1 From 3801b8aa035594972c400c8bd036894a388c4ab3 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 6 Sep 2018 11:23:16 -0400 Subject: try to make flake8 and isort happy --- synapse/api/errors.py | 1 + synapse/handlers/e2e_room_keys.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 5 ++++- synapse/storage/e2e_room_keys.py | 6 ++++-- tests/handlers/test_e2e_room_keys.py | 9 +++++---- 5 files changed, 15 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 3002c95dd1..140dbfe8b8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -326,6 +326,7 @@ class RoomKeysVersionError(SynapseError): ) self.current_version = current_version + class IncompatibleRoomVersionError(SynapseError): """A server is trying to join a room whose version it does not support.""" diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index c09816b372..2c330382cf 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import StoreError, SynapseError, RoomKeysVersionError +from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 9f0172e6f5..1ed18e986f 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -19,8 +19,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, parse_string + RestServlet, + parse_json_object_from_request, + parse_string, ) + from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index b695570a7b..969f4aef9c 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -13,9 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import simplejson as json + from twisted.internet import defer + from synapse.api.errors import StoreError -import simplejson as json from ._base import SQLBaseStore @@ -58,7 +60,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="get_e2e_room_key", ) - row["session_data"] = json.loads(row["session_data"]); + row["session_data"] = json.loads(row["session_data"]) defer.returnValue(row) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 7fa4264441..9e08eac0a5 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -14,17 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy + import mock -from synapse.api import errors + from twisted.internet import defer -import copy import synapse.api.errors import synapse.handlers.e2e_room_keys - import synapse.storage -from tests import unittest, utils +from synapse.api import errors +from tests import unittest, utils # sample room_key data for use in the tests room_keys = { -- cgit 1.5.1 From 7baf66ef5d1575b40b638d4f21204e1646792612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 16:46:51 +0100 Subject: Send existing room tags down sync on join When a user joined a room any existing tags were not sent down the sync stream. Ordinarily this isn't a problem because the user needs to be in the room to have set tags in it, however synapse will sometimes add tags for a user to a room, e.g. for server notices, which need to come down sync. --- synapse/handlers/sync.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9f133ded3f..150d492cb0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1575,6 +1575,14 @@ class SyncHandler(object): newly_joined_room=newly_joined, ) + # When we join the room (or the client requests full_state), we should + # send down any existing tags. Usually the user won't have tags in a + # newly joined room, unless either a) they've joined before or b) the + # tag was added by synapse e.g. for server notice rooms. + if full_state: + user_id = sync_result_builder.sync_config.user.to_string() + tags = yield self.store.get_tags_for_room(user_id, room_id) + account_data_events = [] if tags is not None: account_data_events.append({ -- cgit 1.5.1 From f60c9e2a01598c1900ab785096aa379d1fb6ac5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 17:01:41 +0100 Subject: Don't send empty tags list down sync --- synapse/handlers/sync.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 150d492cb0..7eed2fcc9b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1583,6 +1583,11 @@ class SyncHandler(object): user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) + # If there aren't any tags, don't send the empty tags list down + # sync + if not tags: + tags = None + account_data_events = [] if tags is not None: account_data_events.append({ -- cgit 1.5.1 From 84a750e0c36f653b71a06c564154f257c1b7dee3 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Sep 2018 17:22:53 +0100 Subject: ensure guests never enter mau list --- synapse/storage/monthly_active_users.py | 7 +++---- tests/storage/test_client_ips.py | 4 +--- tests/storage/test_monthly_active_users.py | 6 +++++- tests/utils.py | 1 + 4 files changed, 10 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index e34e16c079..b890c152db 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -192,7 +192,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): )) @defer.inlineCallbacks - def populate_monthly_active_users(self, user_id, is_guest=False): + def populate_monthly_active_users(self, user_id): """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables @@ -201,13 +201,12 @@ class MonthlyActiveUsersStore(SQLBaseStore): """ if self.hs.config.limit_usage_by_mau: - # Guests should not be included as part of MAU group + # Trial users and guests should not be included as part of MAU group + is_guest = yield self.is_guest(user_id) if is_guest: return - is_trial = yield self.is_trial_user(user_id) if is_trial: - # we don't track trial users in the MAU table. return last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c2e88bdbaf..c9b02a062b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -101,13 +101,11 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" + yield self.store.register(user_id=user_id, token="123", password_hash=None) active = yield self.store.user_last_seen_monthly_active(user_id) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" - ) yield self.store.insert_client_ip( user_id, "access_token", "ip", "user_agent", "device_id" ) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index cfe0a7c77d..ccfc21b7fc 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -146,8 +146,12 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list + user_id = "user_id" + self.store.register( + user_id=user_id, token="123", password_hash=None, make_guest=True + ) self.store.upsert_monthly_active_user = Mock() - self.store.populate_monthly_active_users('user_id', True) + self.store.populate_monthly_active_users(user_id) self.pump() self.store.upsert_monthly_active_user.assert_not_called() diff --git a/tests/utils.py b/tests/utils.py index 114470d641..f24bd81bf6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -147,6 +147,7 @@ def setup_test_homeserver( config.hs_disabled_message = "" config.hs_disabled_limit_type = "" config.max_mau_value = 50 + config.mau_trial_days = 0 config.mau_limits_reserved_threepids = [] config.admin_contact = None config.rc_messages_per_second = 10000 -- cgit 1.5.1 From 806964b5de3e5036a897d4b5d477e7616779d019 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 18:51:06 +0100 Subject: add some logging for the keyring queue why is it so damn slow? --- synapse/crypto/keyring.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 30e2742102..a6b31e8b5b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -40,6 +40,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.crypto.keyclient import fetch_server_key from synapse.util import logcontext, unwrapFirstError from synapse.util.logcontext import ( + LoggingContext, PreserveLoggingContext, preserve_fn, run_in_background, @@ -216,23 +217,34 @@ class Keyring(object): servers have completed. Follows the synapse rules of logcontext preservation. """ + loop_count = 1 while True: wait_on = [ - self.key_downloads[server_name] + (server_name, self.key_downloads[server_name]) for server_name in server_names if server_name in self.key_downloads ] - if wait_on: - with PreserveLoggingContext(): - yield defer.DeferredList(wait_on) - else: + if not wait_on: break + logger.info( + "Waiting for existing lookups for %s to complete [loop %i]", + [w[0] for w in wait_on], loop_count, + ) + with PreserveLoggingContext(): + yield defer.DeferredList((w[1] for w in wait_on)) + + loop_count += 1 + + ctx = LoggingContext.current_context() def rm(r, server_name_): - self.key_downloads.pop(server_name_, None) + with PreserveLoggingContext(ctx): + logger.debug("Releasing key lookup lock on %s", server_name_) + self.key_downloads.pop(server_name_, None) return r for server_name, deferred in server_to_deferred.items(): + logger.debug("Got key lookup lock on %s", server_name) self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name) -- cgit 1.5.1 From cd7ef4387205eaeb64b77860a82f782f91e41e93 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 23:56:47 +0100 Subject: clearer logging when things fail, too --- synapse/federation/federation_base.py | 34 +++++++++++++++++++++++++++------- synapse/storage/keys.py | 1 + 2 files changed, 28 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 5be8e66fb8..61782ae1c0 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -143,11 +143,31 @@ class FederationBase(object): def callback(_, pdu): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): - logger.warn( - "Event content has been tampered, redacting %s: %s", - pdu.event_id, pdu.get_pdu_json() - ) - return prune_event(pdu) + # let's try to distinguish between failures because the event was + # redacted (which are somewhat expected) vs actual ball-tampering + # incidents. + # + # This is just a heuristic, so we just assume that if the keys are + # about the same between the redacted and received events, then the + # received event was probably a redacted copy (but we then use our + # *actual* redacted copy to be on the safe side.) + redacted_event = prune_event(pdu) + if ( + set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and + set(six.iterkeys(redacted_event.content)) + == set(six.iterkeys(pdu.content)) + ): + logger.info( + "Event %s seems to have been redacted; using our redacted " + "copy", + pdu.event_id, + ) + else: + logger.warning( + "Event %s content has been tampered, redacting", + pdu.event_id, pdu.get_pdu_json(), + ) + return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warn( @@ -162,8 +182,8 @@ class FederationBase(object): failure.trap(SynapseError) with logcontext.PreserveLoggingContext(ctx): logger.warn( - "Signature check failed for %s", - pdu.event_id, + "Signature check failed for %s: %s", + pdu.event_id, failure.getErrorMessage(), ) return failure diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index f547977600..a1331c1a61 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -134,6 +134,7 @@ class KeyStore(SQLBaseStore): """ key_id = "%s:%s" % (verify_key.alg, verify_key.version) + # XXX fix this to not need a lock (#3819) def _txn(txn): self._simple_upsert_txn( txn, -- cgit 1.5.1 From 6febd8e8f737d28a9fec6cd2bfef0e80bea8437c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 21:40:57 +1000 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 063c304534..65a2b894cc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.4rc2" +__version__ = "0.33.4" -- cgit 1.5.1 From b8ad756bd0d7c42c7c241fa08f5e078561fddded Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Sep 2018 14:20:54 +0100 Subject: Fix jwt import check This handy code attempted to check that we could import jwt, but utterly failed to check it was the right jwt. Fixes https://github.com/matrix-org/synapse/issues/3793 --- synapse/config/homeserver.py | 2 +- synapse/config/jwt.py | 53 -------------------------------------------- synapse/config/jwt_config.py | 53 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 54 deletions(-) delete mode 100644 synapse/config/jwt.py create mode 100644 synapse/config/jwt_config.py (limited to 'synapse') diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 2fd9c48abf..b8d5690f2b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -21,7 +21,7 @@ from .consent_config import ConsentConfig from .database import DatabaseConfig from .emailconfig import EmailConfig from .groups import GroupsConfig -from .jwt import JWTConfig +from .jwt_config import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .metrics import MetricsConfig diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py deleted file mode 100644 index 51e7f7e003..0000000000 --- a/synapse/config/jwt.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015 Niklas Riekenbrauck -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ._base import Config, ConfigError - -MISSING_JWT = ( - """Missing jwt library. This is required for jwt login. - - Install by running: - pip install pyjwt - """ -) - - -class JWTConfig(Config): - def read_config(self, config): - jwt_config = config.get("jwt_config", None) - if jwt_config: - self.jwt_enabled = jwt_config.get("enabled", False) - self.jwt_secret = jwt_config["secret"] - self.jwt_algorithm = jwt_config["algorithm"] - - try: - import jwt - jwt # To stop unused lint. - except ImportError: - raise ConfigError(MISSING_JWT) - else: - self.jwt_enabled = False - self.jwt_secret = None - self.jwt_algorithm = None - - def default_config(self, **kwargs): - return """\ - # The JWT needs to contain a globally unique "sub" (subject) claim. - # - # jwt_config: - # enabled: true - # secret: "a secret" - # algorithm: "HS256" - """ diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py new file mode 100644 index 0000000000..51e7f7e003 --- /dev/null +++ b/synapse/config/jwt_config.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 Niklas Riekenbrauck +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config, ConfigError + +MISSING_JWT = ( + """Missing jwt library. This is required for jwt login. + + Install by running: + pip install pyjwt + """ +) + + +class JWTConfig(Config): + def read_config(self, config): + jwt_config = config.get("jwt_config", None) + if jwt_config: + self.jwt_enabled = jwt_config.get("enabled", False) + self.jwt_secret = jwt_config["secret"] + self.jwt_algorithm = jwt_config["algorithm"] + + try: + import jwt + jwt # To stop unused lint. + except ImportError: + raise ConfigError(MISSING_JWT) + else: + self.jwt_enabled = False + self.jwt_secret = None + self.jwt_algorithm = None + + def default_config(self, **kwargs): + return """\ + # The JWT needs to contain a globally unique "sub" (subject) claim. + # + # jwt_config: + # enabled: true + # secret: "a secret" + # algorithm: "HS256" + """ -- cgit 1.5.1 From 7bc22539ffb8375f92a1b26f6b4cdcfcfa9efd9a Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 10 Sep 2018 14:30:08 +0200 Subject: fix VOIP crashes under Python 3 (#3821) --- synapse/rest/client/v1/voip.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 62f4c3d93e..53da905eea 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -42,7 +42,11 @@ class VoipRestServlet(ClientV1RestServlet): expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000 username = "%d:%s" % (expiry, requester.user.to_string()) - mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1) + mac = hmac.new( + turnSecret.encode(), + msg=username.encode(), + digestmod=hashlib.sha1 + ) # We need to use standard padded base64 encoding here # encode_base64 because we need to add the standard padding to get the # same result as the TURN server. -- cgit 1.5.1 From 9e05c8d3090a956054b5b39347359655236caaef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Sep 2018 10:42:10 +0100 Subject: Change the manhole SSH key to have more bits Newer versions of openssh client refuse to connect to the old key due to its length. --- synapse/util/manhole.py | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) (limited to 'synapse') diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 14be3c7396..8d0f2a8918 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -19,22 +19,40 @@ from twisted.conch.ssh.keys import Key from twisted.cred import checkers, portal PUBLIC_KEY = ( - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az" - "64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJS" - "kbh/C+BR3utDS555mV" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5" + "XALqeK+7385NlLja3DE/DO9mGhnd9+bAy39EKT3sTV6+WXQ4yD0TvEEyUEMtjWkSEm6U32+C" + "DaS3TW/vPBUMeJQwq+Ydcif1UlnpXrDDTamD0AU9VaEvHq+3HAkipqn0TGpKON6aqk4vauDx" + "oXSsV5TXBVrxP/y7HpMOpU4GUWsaaacBTKKNnUaQB4UflvydaPJUuwdaCUJGTMjbhWrjVfK+" + "jslseSPxU6XvrkZMyCr4znxvuDxjMk1RGIdO7v+rbBMLEgqtSMNqJbYeVCnj2CFgc3fcTcld" + "X2uOJDrJb/WRlHulthCh" ) PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY----- -MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW -4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw -vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb -Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 -xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 -PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 -gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu -DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML -pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP -EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== +MIIEpQIBAAKCAQEAx4RgE2luCoRNt/u56x+Ixcd8i6vTo2hLOVwC6nivu9/OTZS4 +2twxPwzvZhoZ3ffmwMt/RCk97E1evll0OMg9E7xBMlBDLY1pEhJulN9vgg2kt01v +7zwVDHiUMKvmHXIn9VJZ6V6ww02pg9AFPVWhLx6vtxwJIqap9ExqSjjemqpOL2rg +8aF0rFeU1wVa8T/8ux6TDqVOBlFrGmmnAUyijZ1GkAeFH5b8nWjyVLsHWglCRkzI +24Vq41Xyvo7JbHkj8VOl765GTMgq+M58b7g8YzJNURiHTu7/q2wTCxIKrUjDaiW2 +HlQp49ghYHN33E3JXV9rjiQ6yW/1kZR7pbYQoQIDAQABAoIBAQC8KJ0q8Wzzwh5B +esa1dQHZ8+4DEsL/Amae66VcVwD0X3cCN1W2IZ7X5W0Ij2kBqr8V51RYhcR+S+Ek +BtzSiBUBvbKGrqcMGKaUgomDIMzai99hd0gvCCyZnEW1OQhFkNkaRNXCfqiZJ27M +fqvSUiU2eOwh9fCvmxoA6Of8o3FbzcJ+1GMcobWRllDtLmj6lgVbDzuA+0jC5daB +9Tj1pBzu3wn3ufxiS+gBnJ+7NcXH3E73lqCcPa2ufbZ1haxfiGCnRIhFXuQDgxFX +vKdEfDgtvas6r1ahGbc+b/q8E8fZT7cABuIU4yfOORK+MhpyWbvoyyzuVGKj3PKt +KSPJu5CZAoGBAOkoJfAVyYteqKcmGTanGqQnAY43CaYf6GdSPX/jg+JmKZg0zqMC +jWZUtPb93i+jnOInbrnuHOiHAxI8wmhEPed28H2lC/LU8PzlqFkZXKFZ4vLOhhRB +/HeHCFIDosPFlohWi3b+GAjD7sXgnIuGmnXWe2ea/TS3yersifDEoKKjAoGBANsQ +gJX2cJv1c3jhdgcs8vAt5zIOKcCLTOr/QPmVf/kxjNgndswcKHwsxE/voTO9q+TF +v/6yCSTxAdjuKz1oIYWgi/dZo82bBKWxNRpgrGviU3/zwxiHlyIXUhzQu78q3VS/ +7S1XVbc7qMV++XkYKHPVD+nVG/gGzFxumX7MLXfrAoGBAJit9cn2OnjNj9uFE1W6 +r7N254ndeLAUjPe73xH0RtTm2a4WRopwjW/JYIetTuYbWgyujc+robqTTuuOZjAp +H/CG7o0Ym251CypQqaFO/l2aowclPp/dZhpPjp9GSjuxFBZLtiBB3DNBOwbRQzIK +/vLTdRQvZkgzYkI4i0vjNt3JAoGBANP8HSKBLymMlShlrSx2b8TB9tc2Y2riohVJ +2ttqs0M2kt/dGJWdrgOz4mikL+983Olt/0P9juHDoxEEMK2kpcPEv40lnmBpYU7h +s8yJvnBLvJe2EJYdJ8AipyAhUX1FgpbvfxmASP8eaUxsegeXvBWTGWojAoS6N2o+ +0KSl+l3vAoGAFqm0gO9f/Q1Se60YQd4l2PZeMnJFv0slpgHHUwegmd6wJhOD7zJ1 +CkZcXwiv7Nog7AI9qKJEUXLjoqL+vJskBzSOqU3tcd670YQMi1aXSXJqYE202K7o +EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs= -----END RSA PRIVATE KEY-----""" -- cgit 1.5.1 From b041115415191f8353177f7e0c1beec81be0c921 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 00:50:39 +0100 Subject: Speed up lazy loading (#3827) * speed up room summaries by pulling their data from room_memberships rather than room state * disable LL for incr syncs, and log incr sync stats (#3840) --- changelog.d/3827.misc | 1 + changelog.d/3840.misc | 1 + synapse/handlers/sync.py | 118 +++++++++++++++++++++++++++++++----------- synapse/storage/events.py | 4 ++ synapse/storage/roommember.py | 65 +++++++++++++++++++++++ 5 files changed, 159 insertions(+), 30 deletions(-) create mode 100644 changelog.d/3827.misc create mode 100644 changelog.d/3840.misc (limited to 'synapse') diff --git a/changelog.d/3827.misc b/changelog.d/3827.misc new file mode 100644 index 0000000000..bc294706cf --- /dev/null +++ b/changelog.d/3827.misc @@ -0,0 +1 @@ +speed up lazy loading by 2-3x diff --git a/changelog.d/3840.misc b/changelog.d/3840.misc new file mode 100644 index 0000000000..b9585ae9be --- /dev/null +++ b/changelog.d/3840.misc @@ -0,0 +1 @@ +Disable lazy loading for incremental syncs for now diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7eed2fcc9b..23983a51ab 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user +from synapse.storage.roommember import MemberSummary from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -525,6 +526,8 @@ class SyncHandler(object): A deferred dict describing the room summary """ + # FIXME: we could/should get this from room_stats when matthew/stats lands + # FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305 last_events, _ = yield self.store.get_recent_event_ids_for_room( room_id, end_token=now_token.room_key, limit=1, @@ -537,44 +540,54 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( last_event.event_id, [ - (EventTypes.Member, None), (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), ] ) - member_ids = { - state_key: event_id - for (t, state_key), event_id in iteritems(state_ids) - if t == EventTypes.Member - } + # this is heavily cached, thus: fast. + details = yield self.store.get_room_summary(room_id) + name_id = state_ids.get((EventTypes.Name, '')) canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, '')) summary = {} - - # FIXME: it feels very heavy to load up every single membership event - # just to calculate the counts. - member_events = yield self.store.get_events(member_ids.values()) - - joined_user_ids = [] - invited_user_ids = [] - - for ev in member_events.values(): - if ev.content.get("membership") == Membership.JOIN: - joined_user_ids.append(ev.state_key) - elif ev.content.get("membership") == Membership.INVITE: - invited_user_ids.append(ev.state_key) + empty_ms = MemberSummary([], 0) # TODO: only send these when they change. - summary["m.joined_member_count"] = len(joined_user_ids) - summary["m.invited_member_count"] = len(invited_user_ids) + summary["m.joined_member_count"] = ( + details.get(Membership.JOIN, empty_ms).count + ) + summary["m.invited_member_count"] = ( + details.get(Membership.INVITE, empty_ms).count + ) if name_id or canonical_alias_id: defer.returnValue(summary) - # FIXME: order by stream ordering, not alphabetic + joined_user_ids = [ + r[0] for r in details.get(Membership.JOIN, empty_ms).members + ] + invited_user_ids = [ + r[0] for r in details.get(Membership.INVITE, empty_ms).members + ] + gone_user_ids = ( + [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + + [r[0] for r in details.get(Membership.BAN, empty_ms).members] + ) + + # FIXME: only build up a member_ids list for our heroes + member_ids = {} + for membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.LEAVE, + Membership.BAN + ): + for user_id, event_id in details.get(membership, empty_ms).members: + member_ids[user_id] = event_id + # FIXME: order by stream ordering rather than as returned by SQL me = sync_config.user.to_string() if (joined_user_ids or invited_user_ids): summary['m.heroes'] = sorted( @@ -586,7 +599,11 @@ class SyncHandler(object): )[0:5] else: summary['m.heroes'] = sorted( - [user_id for user_id in member_ids.keys() if user_id != me] + [ + user_id + for user_id in gone_user_ids + if user_id != me + ] )[0:5] if not sync_config.filter_collection.lazy_load_members(): @@ -719,6 +736,26 @@ class SyncHandler(object): lazy_load_members=lazy_load_members, ) elif batch.limited: + state_at_timeline_start = yield self.store.get_state_ids_for_event( + batch.events[0].event_id, types=types, + filtered_types=filtered_types, + ) + + # for now, we disable LL for gappy syncs - see + # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 + # N.B. this slows down incr syncs as we are now processing way + # more state in the server than if we were LLing. + # + # We still have to filter timeline_start to LL entries (above) in order + # for _calculate_state's LL logic to work, as we have to include LL + # members for timeline senders in case they weren't loaded in the initial + # sync. We do this by (counterintuitively) by filtering timeline_start + # members to just be ones which were timeline senders, which then ensures + # all of the rest get included in the state block (if we need to know + # about them). + types = None + filtered_types = None + state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, types=types, filtered_types=filtered_types, @@ -729,24 +766,21 @@ class SyncHandler(object): filtered_types=filtered_types, ) - state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, - ) - state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, + # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if types: - # We're returning an incremental sync, with no "gap" since - # the previous sync, so normally there would be no state to return + # We're returning an incremental sync, with no + # "gap" since the previous sync, so normally there would be + # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the @@ -1616,10 +1650,24 @@ class SyncHandler(object): ) summary = {} + + # we include a summary in room responses when we're lazy loading + # members (as the client otherwise doesn't have enough info to form + # the name itself). if ( sync_config.filter_collection.lazy_load_members() and ( + # we recalulate the summary: + # if there are membership changes in the timeline, or + # if membership has changed during a gappy sync, or + # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or + ( + # XXX: this may include false positives in the form of LL + # members which have snuck into state + batch.limited and + any(t == EventTypes.Member for (t, k) in state) + ) or since_token is None ) ): @@ -1649,6 +1697,16 @@ class SyncHandler(object): unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) + + if batch.limited: + user_id = sync_result_builder.sync_config.user.to_string() + logger.info( + "Incremental syncing room %s for user %s with %d state events" % ( + room_id, + user_id, + len(state), + ) + ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 8bf87f38f7..30ff87a4c4 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -929,6 +929,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore txn, self.get_users_in_room, (room_id,) ) + self._invalidate_cache_and_stream( + txn, self.get_room_summary, (room_id,) + ) + self._invalidate_cache_and_stream( txn, self.get_current_state_ids, (room_id,) ) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 9b4e6d6aa8..0707f9a86a 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -51,6 +51,12 @@ ProfileInfo = namedtuple( "ProfileInfo", ("avatar_url", "display_name") ) +# "members" points to a truncated list of (user_id, event_id) tuples for users of +# a given membership type, suitable for use in calculating heroes for a room. +# "count" points to the total numberr of users of a given membership type. +MemberSummary = namedtuple( + "MemberSummary", ("members", "count") +) _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" @@ -82,6 +88,65 @@ class RoomMemberWorkerStore(EventsWorkerStore): return [to_ascii(r[0]) for r in txn] return self.runInteraction("get_users_in_room", f) + @cached(max_entries=100000) + def get_room_summary(self, room_id): + """ Get the details of a room roughly suitable for use by the room + summary extension to /sync. Useful when lazy loading room members. + Args: + room_id (str): The room ID to query + Returns: + Deferred[dict[str, MemberSummary]: + dict of membership states, pointing to a MemberSummary named tuple. + """ + + def _get_room_summary_txn(txn): + # first get counts. + # We do this all in one transaction to keep the cache small. + # FIXME: get rid of this when we have room_stats + sql = """ + SELECT count(*), m.membership FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + res = {} + for count, membership in txn: + summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) + + # we order by membership and then fairly arbitrarily by event_id so + # heroes are consistent + sql = """ + SELECT m.user_id, m.membership, m.event_id + FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + ORDER BY + CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + m.event_id ASC + LIMIT ? + """ + + # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. + txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) + for user_id, membership, event_id in txn: + summary = res[to_ascii(membership)] + # we will always have a summary for this membership type at this + # point given the summary currently contains the counts. + members = summary.members + members.append((to_ascii(user_id), to_ascii(event_id))) + + return res + + return self.runInteraction("get_room_summary", _get_room_summary_txn) + @cached() def get_invited_rooms_for_user(self, user_id): """ Get all the rooms the user is invited to -- cgit 1.5.1 From 4084a774a89f0d02406eebda8279c2b8aab89812 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 09:59:15 +0100 Subject: Timeout reading body for outbound HTTP requests --- synapse/http/matrixfederationclient.py | 52 +++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 6a1fc8ca55..f9a1fbf95d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -280,7 +280,10 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield treq.content(response) + body = yield self._timeout_deferred( + treq.content(response), + timeout, + ) raise HttpResponseException( response.code, response.phrase, body ) @@ -394,7 +397,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -444,7 +450,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -496,7 +505,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -543,7 +555,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -585,8 +600,10 @@ class MatrixFederationHttpClient(object): try: with logcontext.PreserveLoggingContext(): - length = yield _readBodyToFile( - response, output_stream, max_size + length = yield self._timeout_deferred( + _readBodyToFile( + response, output_stream, max_size + ), ) except Exception: logger.exception("Failed to download body") @@ -594,6 +611,27 @@ class MatrixFederationHttpClient(object): defer.returnValue((length, headers)) + def _timeout_deferred(self, deferred, timeout_ms=None): + """Times the deferred out after `timeout_ms` ms + + Args: + deferred (Deferred) + timeout_ms (int|None): Timeout in milliseconds. If None defaults + to 60 seconds. + + Returns: + Deferred + """ + + add_timeout_to_deferred( + deferred, + timeout_ms / 1000. if timeout_ms else 60, + self.hs.get_reactor(), + cancelled_to_request_timed_out_error, + ) + + return deferred + class _ReadBodyToFileProtocol(protocol.Protocol): def __init__(self, stream, deferred, max_size): -- cgit 1.5.1 From 8fd93b5eeaeddce16e0b510741dc5d4768cbc78d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 20:16:31 +1000 Subject: Port crypto/ to Python 3 (#3822) --- changelog.d/3822.misc | 1 + synapse/crypto/context_factory.py | 2 +- synapse/crypto/keyclient.py | 8 +++++++- synapse/crypto/keyring.py | 9 +++++---- 4 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 changelog.d/3822.misc (limited to 'synapse') diff --git a/changelog.d/3822.misc b/changelog.d/3822.misc new file mode 100644 index 0000000000..5250f31896 --- /dev/null +++ b/changelog.d/3822.misc @@ -0,0 +1 @@ +crypto/ is now ported to Python 3. diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 1a391adec1..02b76dfcfb 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object): def get_options(self, host): return ClientTLSOptions( - host.decode('utf-8'), + host, CertificateOptions(verify=False).getContext() ) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index e94400b8e2..57d4665e84 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -50,7 +50,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): defer.returnValue((server_response, server_certificate)) except SynapseKeyClientError as e: logger.warn("Error getting key for %r: %s", server_name, e) - if e.status.startswith("4"): + if e.status.startswith(b"4"): # Don't retry for 4xx responses. raise IOError("Cannot get key for %r" % server_name) except (ConnectError, DomainError) as e: @@ -82,6 +82,12 @@ class SynapseKeyClientProtocol(HTTPClient): self._peer = self.transport.getPeer() logger.debug("Connected to %s", self._peer) + if not isinstance(self.path, bytes): + self.path = self.path.encode('ascii') + + if not isinstance(self.host, bytes): + self.host = self.host.encode('ascii') + self.sendCommand(b"GET", self.path) if self.host: self.sendHeader(b"Host", self.host) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 30e2742102..9d497abf17 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -16,9 +16,10 @@ import hashlib import logging -import urllib from collections import namedtuple +from six.moves import urllib + from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -432,7 +433,7 @@ class Keyring(object): # an incoming request. query_response = yield self.client.post_json( destination=perspective_name, - path=b"/_matrix/key/v2/query", + path="/_matrix/key/v2/query", data={ u"server_keys": { server_name: { @@ -513,8 +514,8 @@ class Keyring(object): (response, tls_certificate) = yield fetch_server_key( server_name, self.hs.tls_client_options_factory, - path=(b"/_matrix/key/v2/server/%s" % ( - urllib.quote(requested_key_id), + path=("/_matrix/key/v2/server/%s" % ( + urllib.parse.quote(requested_key_id), )).encode("ascii"), ) -- cgit 1.5.1 From 02aa41809bd48cc1dd02da3634f02f5eae71b41c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 20:41:31 +1000 Subject: Port rest/ to Python 3 (#3823) --- changelog.d/3823.misc | 1 + synapse/rest/client/v1/admin.py | 9 ++++-- synapse/rest/client/v1/events.py | 12 ++++---- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 44 +++++++++++++-------------- synapse/rest/client/v1/push_rule.py | 24 +++++++-------- synapse/rest/client/v1/pusher.py | 4 +-- synapse/rest/client/v1/room.py | 14 +++++---- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/thirdparty.py | 4 +-- synapse/rest/key/v1/server_key_resource.py | 2 +- synapse/rest/key/v2/__init__.py | 4 +-- synapse/rest/key/v2/remote_key_resource.py | 6 ++-- synapse/rest/media/v0/content_repository.py | 4 +-- synapse/rest/media/v1/_base.py | 30 +++++++++--------- synapse/rest/media/v1/download_resource.py | 12 ++++---- synapse/rest/media/v1/media_repository.py | 29 ++++++++++-------- synapse/rest/media/v1/preview_url_resource.py | 10 +++--- 18 files changed, 113 insertions(+), 100 deletions(-) create mode 100644 changelog.d/3823.misc (limited to 'synapse') diff --git a/changelog.d/3823.misc b/changelog.d/3823.misc new file mode 100644 index 0000000000..0da491ddaa --- /dev/null +++ b/changelog.d/3823.misc @@ -0,0 +1 @@ +rest/ is now ported to Python 3. diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index ad536ab570..41534b8c2a 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -101,7 +101,7 @@ class UserRegisterServlet(ClientV1RestServlet): nonce = self.hs.get_secrets().token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) - return (200, {"nonce": nonce.encode('ascii')}) + return (200, {"nonce": nonce}) @defer.inlineCallbacks def on_POST(self, request): @@ -164,7 +164,7 @@ class UserRegisterServlet(ClientV1RestServlet): key=self.hs.config.registration_shared_secret.encode(), digestmod=hashlib.sha1, ) - want_mac.update(nonce) + want_mac.update(nonce.encode('utf8')) want_mac.update(b"\x00") want_mac.update(username) want_mac.update(b"\x00") @@ -173,7 +173,10 @@ class UserRegisterServlet(ClientV1RestServlet): want_mac.update(b"admin" if admin else b"notadmin") want_mac = want_mac.hexdigest() - if not hmac.compare_digest(want_mac, got_mac.encode('ascii')): + if not hmac.compare_digest( + want_mac.encode('ascii'), + got_mac.encode('ascii') + ): raise SynapseError(403, "HMAC incorrect") # Reuse the parts of RegisterRestServlet to reduce code duplication diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 0f3a2e8b51..cd9b3bdbd1 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -45,20 +45,20 @@ class EventStreamRestServlet(ClientV1RestServlet): is_guest = requester.is_guest room_id = None if is_guest: - if "room_id" not in request.args: + if b"room_id" not in request.args: raise SynapseError(400, "Guest users must specify room_id param") - if "room_id" in request.args: - room_id = request.args["room_id"][0] + if b"room_id" in request.args: + room_id = request.args[b"room_id"][0].decode('ascii') pagin_config = PaginationConfig.from_request(request) timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS - if "timeout" in request.args: + if b"timeout" in request.args: try: - timeout = int(request.args["timeout"][0]) + timeout = int(request.args[b"timeout"][0]) except ValueError: raise SynapseError(400, "timeout must be in milliseconds.") - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args chunk = yield self.event_stream_handler.get_stream( requester.user.to_string(), diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index fd5f85b53e..3ead75cb77 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -32,7 +32,7 @@ class InitialSyncRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args pagination_config = PaginationConfig.from_request(request) include_archived = parse_boolean(request, "archived", default=False) content = yield self.initial_sync_handler.snapshot_all_rooms( diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index cb85fa1436..0010699d31 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -14,10 +14,9 @@ # limitations under the License. import logging -import urllib import xml.etree.ElementTree as ET -from six.moves.urllib import parse as urlparse +from six.moves import urllib from canonicaljson import json from saml2 import BINDING_HTTP_POST, config @@ -134,7 +133,7 @@ class LoginRestServlet(ClientV1RestServlet): LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: - relay_state = "&RelayState=" + urllib.quote( + relay_state = "&RelayState=" + urllib.parse.quote( login_submission["relay_state"]) result = { "uri": "%s%s" % (self.idp_redirect_url, relay_state) @@ -366,7 +365,7 @@ class SAML2RestServlet(ClientV1RestServlet): (user_id, token) = yield handler.register_saml2(username) # Forward to the RelayState callback along with ava if 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=authenticated&access_token=' + token + '&user_id=' + user_id + '&ava=' + @@ -377,7 +376,7 @@ class SAML2RestServlet(ClientV1RestServlet): "user_id": user_id, "token": token, "ava": saml2_auth.ava})) elif 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=not_authenticated') finish_request(request) @@ -390,21 +389,22 @@ class CasRedirectServlet(ClientV1RestServlet): def __init__(self, hs): super(CasRedirectServlet, self).__init__(hs) - self.cas_server_url = hs.config.cas_server_url - self.cas_service_url = hs.config.cas_service_url + self.cas_server_url = hs.config.cas_server_url.encode('ascii') + self.cas_service_url = hs.config.cas_service_url.encode('ascii') def on_GET(self, request): args = request.args - if "redirectUrl" not in args: + if b"redirectUrl" not in args: return (400, "Redirect URL not specified for CAS auth") - client_redirect_url_param = urllib.urlencode({ - "redirectUrl": args["redirectUrl"][0] - }) - hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket" - service_param = urllib.urlencode({ - "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) - }) - request.redirect("%s/login?%s" % (self.cas_server_url, service_param)) + client_redirect_url_param = urllib.parse.urlencode({ + b"redirectUrl": args[b"redirectUrl"][0] + }).encode('ascii') + hs_redirect_url = (self.cas_service_url + + b"/_matrix/client/api/v1/login/cas/ticket") + service_param = urllib.parse.urlencode({ + b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) + }).encode('ascii') + request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param)) finish_request(request) @@ -422,11 +422,11 @@ class CasTicketServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - client_redirect_url = request.args["redirectUrl"][0] + client_redirect_url = request.args[b"redirectUrl"][0] http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { - "ticket": request.args["ticket"], + "ticket": request.args[b"ticket"][0].decode('ascii'), "service": self.cas_service_url } try: @@ -471,11 +471,11 @@ class CasTicketServlet(ClientV1RestServlet): finish_request(request) def add_login_token_to_redirect_url(self, url, token): - url_parts = list(urlparse.urlparse(url)) - query = dict(urlparse.parse_qsl(url_parts[4])) + url_parts = list(urllib.parse.urlparse(url)) + query = dict(urllib.parse.parse_qsl(url_parts[4])) query.update({"loginToken": token}) - url_parts[4] = urllib.urlencode(query) - return urlparse.urlunparse(url_parts) + url_parts[4] = urllib.parse.urlencode(query).encode('ascii') + return urllib.parse.urlunparse(url_parts) def parse_cas_response(self, cas_response_body): user = None diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 6e95d9bec2..9382b1f124 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -46,7 +46,7 @@ class PushRuleRestServlet(ClientV1RestServlet): try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) requester = yield self.auth.get_user_by_req(request) @@ -73,7 +73,7 @@ class PushRuleRestServlet(ClientV1RestServlet): content, ) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) before = parse_string(request, "before") if before: @@ -95,9 +95,9 @@ class PushRuleRestServlet(ClientV1RestServlet): ) self.notify_user(user_id) except InconsistentRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) except RuleNotFoundException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) defer.returnValue((200, {})) @@ -142,10 +142,10 @@ class PushRuleRestServlet(ClientV1RestServlet): PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR ) - if path[0] == '': + if path[0] == b'': defer.returnValue((200, rules)) - elif path[0] == 'global': - path = path[1:] + elif path[0] == b'global': + path = [x.decode('ascii') for x in path[1:]] result = _filter_ruleset_with_path(rules['global'], path) defer.returnValue((200, result)) else: @@ -192,10 +192,10 @@ class PushRuleRestServlet(ClientV1RestServlet): def _rule_spec_from_path(path): if len(path) < 2: raise UnrecognizedRequestError() - if path[0] != 'pushrules': + if path[0] != b'pushrules': raise UnrecognizedRequestError() - scope = path[1] + scope = path[1].decode('ascii') path = path[2:] if scope != 'global': raise UnrecognizedRequestError() @@ -203,13 +203,13 @@ def _rule_spec_from_path(path): if len(path) == 0: raise UnrecognizedRequestError() - template = path[0] + template = path[0].decode('ascii') path = path[1:] if len(path) == 0 or len(path[0]) == 0: raise UnrecognizedRequestError() - rule_id = path[0] + rule_id = path[0].decode('ascii') spec = { 'scope': scope, @@ -220,7 +220,7 @@ def _rule_spec_from_path(path): path = path[1:] if len(path) > 0 and len(path[0]) > 0: - spec['attr'] = path[0] + spec['attr'] = path[0].decode('ascii') return spec diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 182a68b1e2..b84f0260f2 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -59,7 +59,7 @@ class PushersRestServlet(ClientV1RestServlet): ] for p in pushers: - for k, v in p.items(): + for k, v in list(p.items()): if k not in allowed_keys: del p[k] @@ -126,7 +126,7 @@ class PushersSetRestServlet(ClientV1RestServlet): profile_tag=content.get('profile_tag', ""), ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: " + pce.message, + raise SynapseError(400, "Config Error: " + str(pce), errcode=Codes.MISSING_PARAM) self.notifier.on_new_replication_data() diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 976d98387d..663934efd0 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -207,7 +207,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): "sender": requester.user.to_string(), } - if 'ts' in request.args and requester.app_service: + if b'ts' in request.args and requester.app_service: event_dict['origin_server_ts'] = parse_integer(request, "ts", 0) event = yield self.event_creation_hander.create_and_send_nonmember_event( @@ -255,7 +255,9 @@ class JoinRoomAliasServlet(ClientV1RestServlet): if RoomID.is_valid(room_identifier): room_id = room_identifier try: - remote_room_hosts = request.args["server_name"] + remote_room_hosts = [ + x.decode('ascii') for x in request.args[b"server_name"] + ] except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): @@ -461,10 +463,10 @@ class RoomMessageListRestServlet(ClientV1RestServlet): pagination_config = PaginationConfig.from_request( request, default_limit=10, ) - as_client_event = "raw" not in request.args - filter_bytes = parse_string(request, "filter") + as_client_event = b"raw" not in request.args + filter_bytes = parse_string(request, b"filter", encoding=None) if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) else: event_filter = None @@ -560,7 +562,7 @@ class RoomEventContextServlet(ClientV1RestServlet): # picking the API shape for symmetry with /messages filter_bytes = parse_string(request, "filter") if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes) event_filter = Filter(json.loads(filter_json)) else: event_filter = None diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 263d8eb73e..0251146722 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -89,7 +89,7 @@ class SyncRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - if "from" in request.args: + if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index d9d379182e..b9b5d07677 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -79,7 +79,7 @@ class ThirdPartyUserServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.USER, protocol, fields @@ -102,7 +102,7 @@ class ThirdPartyLocationServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.LOCATION, protocol, fields diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index b9ee6e1c13..38eb2ee23f 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -88,5 +88,5 @@ class LocalKey(Resource): ) def getChild(self, name, request): - if name == '': + if name == b'': return self diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index 3491fd2118..cb5abcf826 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -22,5 +22,5 @@ from .remote_key_resource import RemoteKey class KeyApiV2Resource(Resource): def __init__(self, hs): Resource.__init__(self) - self.putChild("server", LocalKey(hs)) - self.putChild("query", RemoteKey(hs)) + self.putChild(b"server", LocalKey(hs)) + self.putChild(b"query", RemoteKey(hs)) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 7d67e4b064..eb8782aa6e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -103,7 +103,7 @@ class RemoteKey(Resource): def async_render_GET(self, request): if len(request.postpath) == 1: server, = request.postpath - query = {server: {}} + query = {server.decode('ascii'): {}} elif len(request.postpath) == 2: server, key_id = request.postpath minimum_valid_until_ts = parse_integer( @@ -112,11 +112,12 @@ class RemoteKey(Resource): arguments = {} if minimum_valid_until_ts is not None: arguments["minimum_valid_until_ts"] = minimum_valid_until_ts - query = {server: {key_id: arguments}} + query = {server.decode('ascii'): {key_id.decode('ascii'): arguments}} else: raise SynapseError( 404, "Not found %r" % request.postpath, Codes.NOT_FOUND ) + yield self.query_keys(request, query, query_remote_on_cache_miss=True) def render_POST(self, request): @@ -135,6 +136,7 @@ class RemoteKey(Resource): @defer.inlineCallbacks def query_keys(self, request, query, query_remote_on_cache_miss=False): logger.info("Handling query for keys %r", query) + store_queries = [] for server_name, key_ids in query.items(): if ( diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index f255f2883f..5a426ff2f6 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -56,7 +56,7 @@ class ContentRepoResource(resource.Resource): # servers. # TODO: A little crude here, we could do this better. - filename = request.path.split('/')[-1] + filename = request.path.decode('ascii').split('/')[-1] # be paranoid filename = re.sub("[^0-9A-z.-_]", "", filename) @@ -78,7 +78,7 @@ class ContentRepoResource(resource.Resource): # select private. don't bother setting Expires as all our matrix # clients are smart enough to be happy with Cache-Control (right?) request.setHeader( - "Cache-Control", "public,max-age=86400,s-maxage=86400" + b"Cache-Control", b"public,max-age=86400,s-maxage=86400" ) d = FileSender().beginFileTransfer(f, request) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 65f4bd2910..76e479afa3 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -15,9 +15,8 @@ import logging import os -import urllib -from six.moves.urllib import parse as urlparse +from six.moves import urllib from twisted.internet import defer from twisted.protocols.basic import FileSender @@ -35,10 +34,15 @@ def parse_media_id(request): # This allows users to append e.g. /test.png to the URL. Useful for # clients that parse the URL to see content type. server_name, media_id = request.postpath[:2] + + if isinstance(server_name, bytes): + server_name = server_name.decode('utf-8') + media_id = media_id.decode('utf8') + file_name = None if len(request.postpath) > 2: try: - file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8") + file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8")) except UnicodeDecodeError: pass return server_name, media_id, file_name @@ -93,22 +97,18 @@ def add_file_headers(request, media_type, file_size, upload_name): file_size (int): Size in bytes of the media, if known. upload_name (str): The name of the requested file, if any. """ + def _quote(x): + return urllib.parse.quote(x.encode("utf-8")) + request.setHeader(b"Content-Type", media_type.encode("UTF-8")) if upload_name: if is_ascii(upload_name): - request.setHeader( - b"Content-Disposition", - b"inline; filename=%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii") else: - request.setHeader( - b"Content-Disposition", - b"inline; filename*=utf-8''%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ( + "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii") + + request.setHeader(b"Content-Disposition", disposition) # cache for at least a day. # XXX: we might want to turn this off for data we don't want to diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index fbfa85f74f..ca90964d1d 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -47,12 +47,12 @@ class DownloadResource(Resource): def _async_render_GET(self, request): set_cors_headers(request) request.setHeader( - "Content-Security-Policy", - "default-src 'none';" - " script-src 'none';" - " plugin-types application/pdf;" - " style-src 'unsafe-inline';" - " object-src 'self';" + b"Content-Security-Policy", + b"default-src 'none';" + b" script-src 'none';" + b" plugin-types application/pdf;" + b" style-src 'unsafe-inline';" + b" object-src 'self';" ) server_name, media_id, name = parse_media_id(request) if server_name == self.server_name: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 241c972070..a828ff4438 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -20,7 +20,7 @@ import logging import os import shutil -from six import iteritems +from six import PY3, iteritems from six.moves.urllib import parse as urlparse import twisted.internet.error @@ -397,13 +397,13 @@ class MediaRepository(object): yield finish() - media_type = headers["Content-Type"][0] + media_type = headers[b"Content-Type"][0].decode('ascii') time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: - _, params = cgi.parse_header(content_disposition[0],) + _, params = cgi.parse_header(content_disposition[0].decode('ascii'),) upload_name = None # First check if there is a valid UTF-8 filename @@ -419,9 +419,13 @@ class MediaRepository(object): upload_name = upload_name_ascii if upload_name: - upload_name = urlparse.unquote(upload_name) + if PY3: + upload_name = urlparse.unquote(upload_name) + else: + upload_name = urlparse.unquote(upload_name.encode('ascii')) try: - upload_name = upload_name.decode("utf-8") + if isinstance(upload_name, bytes): + upload_name = upload_name.decode("utf-8") except UnicodeDecodeError: upload_name = None else: @@ -755,14 +759,15 @@ class MediaRepositoryResource(Resource): Resource.__init__(self) media_repo = hs.get_media_repository() - self.putChild("upload", UploadResource(hs, media_repo)) - self.putChild("download", DownloadResource(hs, media_repo)) - self.putChild("thumbnail", ThumbnailResource( + + self.putChild(b"upload", UploadResource(hs, media_repo)) + self.putChild(b"download", DownloadResource(hs, media_repo)) + self.putChild(b"thumbnail", ThumbnailResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("identicon", IdenticonResource()) + self.putChild(b"identicon", IdenticonResource()) if hs.config.url_preview_enabled: - self.putChild("preview_url", PreviewUrlResource( + self.putChild(b"preview_url", PreviewUrlResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("config", MediaConfigResource(hs)) + self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 778ef97337..cad2dec33a 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -261,7 +261,7 @@ class PreviewUrlResource(Resource): logger.debug("Calculated OG for %s as %s" % (url, og)) - jsonog = json.dumps(og) + jsonog = json.dumps(og).encode('utf8') # store OG in history-aware DB cache yield self.store.store_url_cache( @@ -301,20 +301,20 @@ class PreviewUrlResource(Resource): logger.warn("Error downloading %s: %r", url, e) raise SynapseError( 500, "Failed to download content: %s" % ( - traceback.format_exception_only(sys.exc_type, e), + traceback.format_exception_only(sys.exc_info()[0], e), ), Codes.UNKNOWN, ) yield finish() try: - if "Content-Type" in headers: - media_type = headers["Content-Type"][0] + if b"Content-Type" in headers: + media_type = headers[b"Content-Type"][0].decode('ascii') else: media_type = "application/octet-stream" time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: _, params = cgi.parse_header(content_disposition[0],) download_name = None -- cgit 1.5.1 From 0ddf48672421aa64920342b959dc77432c869889 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 12 Sep 2018 11:58:52 +0100 Subject: expose number of real reserved users --- synapse/app/homeserver.py | 10 +++++++--- synapse/storage/monthly_active_users.py | 17 ++++++++++++++++ tests/storage/test_monthly_active_users.py | 31 ++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3eb5b663de..e6372bcec2 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -307,6 +307,7 @@ class SynapseHomeServer(HomeServer): # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +reserved_mau_gauge = Gauge("synapse_admin_mau:reserved", "Reserved real MAU users") def setup(config_options): @@ -531,10 +532,13 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): - count = 0 + current_mau_count = 0 + reserved_mau_count = 0 if hs.config.limit_usage_by_mau: - count = yield hs.get_datastore().get_monthly_active_count() - current_mau_gauge.set(float(count)) + current_mau_count = yield hs.get_datastore().get_monthly_active_count() + reserved_mau_count = yield hs.get_datastore().get_reserved_real_user_account() + current_mau_gauge.set(float(current_mau_count)) + reserved_mau_gauge.set(float(reserved_mau_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) hs.get_datastore().initialise_reserved_users( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index b890c152db..53d125d305 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -146,6 +146,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): return count return self.runInteraction("count_users", _count_users) + @defer.inlineCallbacks + def get_reserved_real_user_account(self): + """Of the reserved threepids defined in config, how many are associated + with registered users? + + Returns: + Defered[int]: Number of real reserved users + """ + count = 0 + for tp in self.hs.config.mau_limits_reserved_threepids: + user_id = yield self.hs.get_datastore().get_user_id_by_threepid( + tp["medium"], tp["address"] + ) + if user_id: + count = count + 1 + defer.returnValue(count) + @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): """ diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index ccfc21b7fc..662f2ed845 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -183,3 +183,34 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.store.populate_monthly_active_users('user_id') self.pump() self.store.upsert_monthly_active_user.assert_not_called() + + def test_get_reserved_real_user_account(self): + # Test no reserved users, or reserved threepids + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), 0) + # Test reserved users but no registered users + + user1 = '@user1:example.com' + user2 = '@user2:example.com' + user1_email = 'user1@example.com' + user2_email = 'user2@example.com' + threepids = [ + {'medium': 'email', 'address': user1_email}, + {'medium': 'email', 'address': user2_email}, + ] + self.hs.config.mau_limits_reserved_threepids = threepids + self.store.initialise_reserved_users(threepids) + self.pump() + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), 0) + + # Test reserved registed users + self.store.register(user_id=user1, token="123", password_hash=None) + self.store.register(user_id=user2, token="456", password_hash=None) + self.pump() + + now = int(self.hs.get_clock().time_msec()) + self.store.user_add_threepid(user1, "email", user1_email, now, now) + self.store.user_add_threepid(user2, "email", user2_email, now, now) + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), len(threepids)) -- cgit 1.5.1 From 7ca097f77eba19403b4169e23404678aa2c7cd91 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 23:23:32 +1000 Subject: Port federation/ to py3 (#3847) --- changelog.d/3847.misc | 1 + synapse/federation/federation_client.py | 8 ++++---- synapse/federation/transport/client.py | 5 +++-- synapse/federation/transport/server.py | 24 +++++++++++------------- 4 files changed, 19 insertions(+), 19 deletions(-) create mode 100644 changelog.d/3847.misc (limited to 'synapse') diff --git a/changelog.d/3847.misc b/changelog.d/3847.misc new file mode 100644 index 0000000000..bf8b5afea4 --- /dev/null +++ b/changelog.d/3847.misc @@ -0,0 +1 @@ +federation/ is now ported to Python 3. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c9f3c2d352..fe67b2ff42 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -271,10 +271,10 @@ class FederationClient(FederationBase): event_id, destination, e, ) except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: - logger.info(e.message) + logger.info(str(e)) continue except Exception as e: pdu_attempts[destination] = now @@ -510,7 +510,7 @@ class FederationClient(FederationBase): else: logger.warn( "Failed to %s via %s: %i %s", - description, destination, e.code, e.message, + description, destination, e.code, e.args[0], ) except Exception: logger.warn( @@ -875,7 +875,7 @@ class FederationClient(FederationBase): except Exception as e: logger.exception( "Failed to send_third_party_invite via %s: %s", - destination, e.message + destination, str(e) ) raise RuntimeError("Failed to send to any server.") diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1054441ca5..2ab973d6c8 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import urllib + +from six.moves import urllib from twisted.internet import defer @@ -951,4 +952,4 @@ def _create_path(prefix, path, *args): Returns: str """ - return prefix + path % tuple(urllib.quote(arg, "") for arg in args) + return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3972922ff9..2f874b4838 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -90,8 +90,8 @@ class Authenticator(object): @defer.inlineCallbacks def authenticate_request(self, request, content): json_request = { - "method": request.method, - "uri": request.uri, + "method": request.method.decode('ascii'), + "uri": request.uri.decode('ascii'), "destination": self.server_name, "signatures": {}, } @@ -252,7 +252,7 @@ class BaseFederationServlet(object): by the callback method. None if the request has already been handled. """ content = None - if request.method in ["PUT", "POST"]: + if request.method in [b"PUT", b"POST"]: # TODO: Handle other method types? other content types? content = parse_json_object_from_request(request) @@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet): return self.handler.on_context_state_request( origin, context, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet): return self.handler.on_state_ids_request( origin, room_id, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -405,14 +405,12 @@ class FederationBackfillServlet(BaseFederationServlet): PATH = "/backfill/(?P[^/]*)/" def on_GET(self, origin, content, query, context): - versions = query["v"] - limits = query["limit"] + versions = [x.decode('ascii') for x in query[b"v"]] + limit = parse_integer_from_args(query, "limit", None) - if not limits: + if not limit: return defer.succeed((400, {"error": "Did not include limit param"})) - limit = int(limits[-1]) - return self.handler.on_backfill_request(origin, context, versions, limit) @@ -423,7 +421,7 @@ class FederationQueryServlet(BaseFederationServlet): def on_GET(self, origin, content, query, query_type): return self.handler.on_query_request( query_type, - {k: v[0].decode("utf-8") for k, v in query.items()} + {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()} ) @@ -630,14 +628,14 @@ class OpenIdUserInfo(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query): - token = query.get("access_token", [None])[0] + token = query.get(b"access_token", [None])[0] if token is None: defer.returnValue((401, { "errcode": "M_MISSING_TOKEN", "error": "Access Token required" })) return - user_id = yield self.handler.on_openid_userinfo(token) + user_id = yield self.handler.on_openid_userinfo(token.decode('ascii')) if user_id is None: defer.returnValue((401, { -- cgit 1.5.1 From 8c5b84441bdcc0f321ca9a58e8ec20cb35cdb456 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 16:22:14 +0100 Subject: Log outbound requests when we retry --- synapse/http/matrixfederationclient.py | 149 ++++++++++++++++----------------- 1 file changed, 74 insertions(+), 75 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index f9a1fbf95d..cf920bc041 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -177,11 +177,6 @@ class MatrixFederationHttpClient(object): txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (MAXINT - 1) - outbound_logger.info( - "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url - ) - # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -194,85 +189,89 @@ class MatrixFederationHttpClient(object): ).decode('ascii') log_result = None - try: - while True: - try: - if json_callback: - json = json_callback() - - if json: - data = encode_canonical_json(json) - headers_dict["Content-Type"] = ["application/json"] - self.sign_request( - destination, method, http_url, headers_dict, json - ) - else: - data = None - self.sign_request(destination, method, http_url, headers_dict) - - request_deferred = treq.request( - method, - url, - headers=Headers(headers_dict), - data=data, - agent=self.agent, - ) - add_timeout_to_deferred( - request_deferred, - timeout / 1000. if timeout else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) - response = yield make_deferred_yieldable( - request_deferred, + while True: + try: + if json_callback: + json = json_callback() + + if json: + data = encode_canonical_json(json) + headers_dict["Content-Type"] = ["application/json"] + self.sign_request( + destination, method, http_url, headers_dict, json ) + else: + data = None + self.sign_request(destination, method, http_url, headers_dict) - log_result = "%d %s" % (response.code, response.phrase,) - break - except Exception as e: - if not retry_on_dns_fail and isinstance(e, DNSLookupError): - logger.warn( - "DNS Lookup failed to %s with %s", - destination, - e - ) - log_result = "DNS Lookup failed to %s with %s" % ( - destination, e - ) - raise + outbound_logger.info( + "{%s} [%s] Sending request: %s %s", + txn_id, destination, method, url + ) + request_deferred = treq.request( + method, + url, + headers=Headers(headers_dict), + data=data, + agent=self.agent, + ) + add_timeout_to_deferred( + request_deferred, + timeout / 1000. if timeout else 60, + self.hs.get_reactor(), + cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable( + request_deferred, + ) + + log_result = "%d %s" % (response.code, response.phrase,) + break + except Exception as e: + if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn( - "{%s} Sending request failed to %s: %s %s: %s", - txn_id, + "DNS Lookup failed to %s with %s", destination, - method, - url, - _flatten_response_never_received(e), + e ) + log_result = "DNS Lookup failed to %s with %s" % ( + destination, e + ) + raise + + logger.warn( + "{%s} Sending request failed to %s: %s %s: %s", + txn_id, + destination, + method, + url, + _flatten_response_never_received(e), + ) - log_result = _flatten_response_never_received(e) - - if retries_left and not timeout: - if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) - delay *= random.uniform(0.8, 1.4) - else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) - delay *= random.uniform(0.8, 1.4) + log_result = _flatten_response_never_received(e) - yield self.clock.sleep(delay) - retries_left -= 1 + if retries_left and not timeout: + if long_retries: + delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) + delay = min(delay, 60) + delay *= random.uniform(0.8, 1.4) else: - raise - finally: - outbound_logger.info( - "{%s} [%s] Result: %s", - txn_id, - destination, - log_result, - ) + delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) + delay = min(delay, 2) + delay *= random.uniform(0.8, 1.4) + + yield self.clock.sleep(delay) + retries_left -= 1 + else: + raise + finally: + outbound_logger.info( + "{%s} [%s] Result: %s", + txn_id, + destination, + log_result, + ) if 200 <= response.code < 300: pass -- cgit 1.5.1 From 8decd6233dabd87160794949ffd95282e60ab01e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 12 Sep 2018 16:22:15 +0100 Subject: improve naming --- changelog.d/3846.feature | 2 +- synapse/app/homeserver.py | 14 +++++++++----- synapse/storage/monthly_active_users.py | 2 +- tests/storage/test_monthly_active_users.py | 6 +++--- 4 files changed, 14 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/changelog.d/3846.feature b/changelog.d/3846.feature index dc9e9d86cc..453c11d3f8 100644 --- a/changelog.d/3846.feature +++ b/changelog.d/3846.feature @@ -1 +1 @@ -create synapse_admin_mau:reserved metric to expose number of real reaserved users +Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e6372bcec2..ac97e19649 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -307,7 +307,10 @@ class SynapseHomeServer(HomeServer): # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") -reserved_mau_gauge = Gauge("synapse_admin_mau:reserved", "Reserved real MAU users") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids" +) def setup(config_options): @@ -533,12 +536,13 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 - reserved_mau_count = 0 + reserved_count = 0 + store = hs.get_datastore() if hs.config.limit_usage_by_mau: - current_mau_count = yield hs.get_datastore().get_monthly_active_count() - reserved_mau_count = yield hs.get_datastore().get_reserved_real_user_account() + current_mau_count = yield store.get_monthly_active_count() + reserved_count = yield store.get_registered_reserved_users_count() current_mau_gauge.set(float(current_mau_count)) - reserved_mau_gauge.set(float(reserved_mau_count)) + registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) hs.get_datastore().initialise_reserved_users( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 53d125d305..59580949f1 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -147,7 +147,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): return self.runInteraction("count_users", _count_users) @defer.inlineCallbacks - def get_reserved_real_user_account(self): + def get_registered_reserved_users_count(self): """Of the reserved threepids defined in config, how many are associated with registered users? diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 662f2ed845..686f12a0dc 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -186,7 +186,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): def test_get_reserved_real_user_account(self): # Test no reserved users, or reserved threepids - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) # Test reserved users but no registered users @@ -201,7 +201,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.hs.config.mau_limits_reserved_threepids = threepids self.store.initialise_reserved_users(threepids) self.pump() - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) # Test reserved registed users @@ -212,5 +212,5 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), len(threepids)) -- cgit 1.5.1 From 11bfc2af1cfdff08775160ea0c91c51326f1f665 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:45:42 +0100 Subject: fix logline --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 23983a51ab..34729e7f8c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1701,7 +1701,7 @@ class SyncHandler(object): if batch.limited: user_id = sync_result_builder.sync_config.user.to_string() logger.info( - "Incremental syncing room %s for user %s with %d state events" % ( + "Incremental gappy sync of room %s for user %s with %d state events" % ( room_id, user_id, len(state), -- cgit 1.5.1 From 0e200e366d568586e8f7f9dd0beb9b53bef86047 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:47:20 +0100 Subject: correctly log gappy sync metrics --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 34729e7f8c..88ef894d6e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1698,7 +1698,7 @@ class SyncHandler(object): sync_result_builder.joined.append(room_sync) - if batch.limited: + if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of room %s for user %s with %d state events" % ( -- cgit 1.5.1 From 0403cf07839abc0129ed75e785b10330338caf76 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:54:28 +0100 Subject: argh pep8 --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 88ef894d6e..45a1dd5952 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1701,7 +1701,7 @@ class SyncHandler(object): if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( - "Incremental gappy sync of room %s for user %s with %d state events" % ( + "Incremental gappy sync of %s for user %s with %d state events" % ( room_id, user_id, len(state), -- cgit 1.5.1 From 2ac1abbc7e8647548ae2e32119cb2bd3b173d9bd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 17:11:05 +0100 Subject: show heroes if a room has a 'deleted' name/canonical_alias (#3851) --- changelog.d/3851.bugfix | 1 + synapse/handlers/sync.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3851.bugfix (limited to 'synapse') diff --git a/changelog.d/3851.bugfix b/changelog.d/3851.bugfix new file mode 100644 index 0000000000..b53a9efe7b --- /dev/null +++ b/changelog.d/3851.bugfix @@ -0,0 +1 @@ +Show heroes if room name/canonical alias has been deleted diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 45a1dd5952..9bca4e7067 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -562,8 +562,21 @@ class SyncHandler(object): details.get(Membership.INVITE, empty_ms).count ) - if name_id or canonical_alias_id: - defer.returnValue(summary) + # if the room has a name or canonical_alias set, we can skip + # calculating heroes. we assume that if the event has contents, it'll + # be a valid name or canonical_alias - i.e. we're checking that they + # haven't been "deleted" by blatting {} over the top. + if name_id: + name = yield self.store.get_event(name_id, allow_none=False) + if name and name.content: + defer.returnValue(summary) + + if canonical_alias_id: + canonical_alias = yield self.store.get_event( + canonical_alias_id, allow_none=False, + ) + if canonical_alias and canonical_alias.content: + defer.returnValue(summary) joined_user_ids = [ r[0] for r in details.get(Membership.JOIN, empty_ms).members -- cgit 1.5.1 From b7d2fb5eb9c55f95048444bb6703abbf2fe14c5c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 13 Sep 2018 19:59:32 +1000 Subject: Remove some superfluous logging (#3855) --- .circleci/merge_base_branch.sh | 2 +- changelog.d/3855.misc | 1 + synapse/__init__.py | 10 ++++++++++ synapse/config/logger.py | 17 ++++++++++++++++- 4 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3855.misc (limited to 'synapse') diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh index 2d700dbf11..4e297d77da 100755 --- a/.circleci/merge_base_branch.sh +++ b/.circleci/merge_base_branch.sh @@ -4,7 +4,7 @@ set -e # CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful. # In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL. -echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> "$BASH_ENV" +echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV source $BASH_ENV if [[ -z "${CIRCLE_PR_NUMBER}" ]] diff --git a/changelog.d/3855.misc b/changelog.d/3855.misc new file mode 100644 index 0000000000..a25bb020ba --- /dev/null +++ b/changelog.d/3855.misc @@ -0,0 +1 @@ +Removed some excess logging messages. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 65a2b894cc..9dbe0b9f10 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,14 @@ """ This is a reference implementation of a Matrix home server. """ +try: + from twisted.internet import protocol + from twisted.internet.protocol import Factory + from twisted.names.dns import DNSDatagramProtocol + protocol.Factory.noisy = False + Factory.noisy = False + DNSDatagramProtocol.noisy = False +except ImportError: + pass + __version__ = "0.33.4" diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 3f187adfc8..e9a936118d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -227,7 +227,22 @@ def setup_logging(config, use_worker_options=False): # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() + + def _log(event): + + if "log_text" in event: + if event["log_text"].startswith("DNSDatagramProtocol starting on "): + return + + if event["log_text"].startswith("(UDP Port "): + return + + if event["log_text"].startswith("Timing out client"): + return + + return observer(event) + globalLogBeginner.beginLoggingTo( - [observer], + [_log], redirectStandardIO=not config.no_redirect_stdio, ) -- cgit 1.5.1 From c857f5ef9b77b07179555a63973f11b8aa8c1752 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 12:48:10 +0100 Subject: Make purge history slightly faster Don't pull out events that are outliers and won't be deleted, as nothing should happen to them. --- synapse/storage/events.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 30ff87a4c4..1c8e01a47e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1930,15 +1930,22 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore should_delete_params = () if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" - should_delete_params += ("%:" + self.hs.hostname, ) + should_delete_params += ( + "%:" + self.hs.hostname, + "%:" + self.hs.hostname, + ) should_delete_params += (room_id, token.topological) + # Note that we insert events that are outliers and aren't going to be + # deleted, as nothing will happen to them. txn.execute( "INSERT INTO events_to_purge" " SELECT event_id, %s" " FROM events AS e LEFT JOIN state_events USING (event_id)" - " WHERE e.room_id = ? AND topological_ordering < ?" % ( + " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" + % ( + should_delete_expr, should_delete_expr, ), should_delete_params, -- cgit 1.5.1 From 9dbe38ea7de66f6e5dfab1570dd3a5217a4883eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:05:52 +0100 Subject: Create indices after insertion --- synapse/storage/events.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1c8e01a47e..622f2ababf 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1890,20 +1890,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ")" ) - # create an index on should_delete because later we'll be looking for - # the should_delete / shouldn't_delete subsets - txn.execute( - "CREATE INDEX events_to_purge_should_delete" - " ON events_to_purge(should_delete)", - ) - - # We do joins against events_to_purge for e.g. calculating state - # groups to purge, etc., so lets make an index. - txn.execute( - "CREATE INDEX events_to_purge_id" - " ON events_to_purge(event_id)", - ) - # First ensure that we're not about to delete all the forward extremeties txn.execute( "SELECT e.event_id, e.depth FROM events as e " @@ -1950,6 +1936,24 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ), should_delete_params, ) + + # We create the indices *after* insertion as that's a lot faster. + + # create an index on should_delete because later we'll be looking for + # the should_delete / shouldn't_delete subsets + txn.execute( + "CREATE INDEX events_to_purge_should_delete" + " ON events_to_purge(should_delete)", + ) + + # We do joins against events_to_purge for e.g. calculating state + # groups to purge, etc., so lets make an index. + txn.execute( + "CREATE INDEX events_to_purge_id" + " ON events_to_purge(event_id)", + ) + + txn.execute( "SELECT event_id, should_delete FROM events_to_purge" ) -- cgit 1.5.1 From 9cbd0094f0dcf03676b3234896e73af80cb75021 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:15:35 +0100 Subject: pep8 --- synapse/storage/events.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 622f2ababf..bc1c18023b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1953,7 +1953,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore " ON events_to_purge(event_id)", ) - txn.execute( "SELECT event_id, should_delete FROM events_to_purge" ) -- cgit 1.5.1 From bfa0b759e02c74931606ea77f34bec99dfb10589 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 00:15:51 +1000 Subject: Attempt to figure out what's going on with timeouts (#3857) --- changelog.d/3857.misc | 1 + synapse/http/matrixfederationclient.py | 98 +++++++++----------- tests/http/test_fedclient.py | 157 +++++++++++++++++++++++++++++++++ tests/server.py | 42 ++++++++- 4 files changed, 241 insertions(+), 57 deletions(-) create mode 100644 changelog.d/3857.misc create mode 100644 tests/http/test_fedclient.py (limited to 'synapse') diff --git a/changelog.d/3857.misc b/changelog.d/3857.misc new file mode 100644 index 0000000000..e128d193d9 --- /dev/null +++ b/changelog.d/3857.misc @@ -0,0 +1 @@ +Refactor some HTTP timeout code. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cf920bc041..c49dbacd93 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -26,7 +26,7 @@ from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json -from twisted.internet import defer, protocol, reactor +from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError from twisted.web._newclient import ResponseDone from twisted.web.client import Agent, HTTPConnectionPool @@ -40,10 +40,8 @@ from synapse.api.errors import ( HttpResponseException, SynapseError, ) -from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext -from synapse.util.async_helpers import add_timeout_to_deferred from synapse.util.logcontext import make_deferred_yieldable logger = logging.getLogger(__name__) @@ -66,13 +64,14 @@ else: class MatrixFederationEndpointFactory(object): def __init__(self, hs): + self.reactor = hs.get_reactor() self.tls_client_options_factory = hs.tls_client_options_factory def endpointForURI(self, uri): destination = uri.netloc.decode('ascii') return matrix_federation_endpoint( - reactor, destination, timeout=10, + self.reactor, destination, timeout=10, tls_client_options_factory=self.tls_client_options_factory ) @@ -90,6 +89,7 @@ class MatrixFederationHttpClient(object): self.hs = hs self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname + reactor = hs.get_reactor() pool = HTTPConnectionPool(reactor) pool.maxPersistentPerHost = 5 pool.cachedConnectionTimeout = 2 * 60 @@ -100,6 +100,7 @@ class MatrixFederationHttpClient(object): self._store = hs.get_datastore() self.version_string = hs.version_string.encode('ascii') self._next_id = 1 + self.default_timeout = 60 def _create_url(self, destination, path_bytes, param_bytes, query_bytes): return urllib.parse.urlunparse( @@ -143,6 +144,11 @@ class MatrixFederationHttpClient(object): (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + if ( self.hs.config.federation_domain_whitelist is not None and destination not in self.hs.config.federation_domain_whitelist @@ -215,13 +221,9 @@ class MatrixFederationHttpClient(object): headers=Headers(headers_dict), data=data, agent=self.agent, + reactor=self.hs.get_reactor() ) - add_timeout_to_deferred( - request_deferred, - timeout / 1000. if timeout else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) + request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) response = yield make_deferred_yieldable( request_deferred, ) @@ -261,6 +263,13 @@ class MatrixFederationHttpClient(object): delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) + logger.debug( + "{%s} Waiting %s before sending to %s...", + txn_id, + delay, + destination + ) + yield self.clock.sleep(delay) retries_left -= 1 else: @@ -279,10 +288,9 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.content(response), - timeout, - ) + d = treq.content(response) + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body ) @@ -396,10 +404,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @defer.inlineCallbacks @@ -449,10 +456,14 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -504,10 +515,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -554,10 +564,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -599,38 +608,15 @@ class MatrixFederationHttpClient(object): try: with logcontext.PreserveLoggingContext(): - length = yield self._timeout_deferred( - _readBodyToFile( - response, output_stream, max_size - ), - ) + d = _readBodyToFile(response, output_stream, max_size) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + length = yield make_deferred_yieldable(d) except Exception: logger.exception("Failed to download body") raise defer.returnValue((length, headers)) - def _timeout_deferred(self, deferred, timeout_ms=None): - """Times the deferred out after `timeout_ms` ms - - Args: - deferred (Deferred) - timeout_ms (int|None): Timeout in milliseconds. If None defaults - to 60 seconds. - - Returns: - Deferred - """ - - add_timeout_to_deferred( - deferred, - timeout_ms / 1000. if timeout_ms else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) - - return deferred - class _ReadBodyToFileProtocol(protocol.Protocol): def __init__(self, stream, deferred, max_size): diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py new file mode 100644 index 0000000000..1c46c9cfeb --- /dev/null +++ b/tests/http/test_fedclient.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet.defer import TimeoutError +from twisted.internet.error import ConnectingCancelledError, DNSLookupError +from twisted.web.client import ResponseNeverReceived + +from synapse.http.matrixfederationclient import MatrixFederationHttpClient + +from tests.unittest import HomeserverTestCase + + +class FederationClientTests(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + + hs = self.setup_test_homeserver(reactor=reactor, clock=clock) + hs.tls_client_options_factory = None + return hs + + def prepare(self, reactor, clock, homeserver): + + self.cl = MatrixFederationHttpClient(self.hs) + self.reactor.lookups["testserv"] = "1.2.3.4" + + def test_dns_error(self): + """ + If the DNS raising returns an error, it will bubble up. + """ + d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000) + self.pump() + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, DNSLookupError) + + def test_client_never_connect(self): + """ + If the HTTP request is not connected and is timed out, it'll give a + ConnectingCancelledError. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, ConnectingCancelledError) + + def test_client_connect_no_response(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + conn = Mock() + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, ResponseNeverReceived) + + def test_client_gets_headers(self): + """ + Once the client gets the headers, _request returns successfully. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n") + + # We should get a successful response + r = self.successResultOf(d) + self.assertEqual(r.code, 200) + + def test_client_headers_no_body(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived( + (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" + b"Server: Fake\r\n\r\n") + ) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, TimeoutError) diff --git a/tests/server.py b/tests/server.py index a2c3ca61f6..420ec4e088 100644 --- a/tests/server.py +++ b/tests/server.py @@ -4,9 +4,14 @@ from io import BytesIO from six import text_type import attr +from zope.interface import implementer -from twisted.internet import address, threads +from twisted.internet import address, threads, udp +from twisted.internet._resolver import HostResolution +from twisted.internet.address import IPv4Address from twisted.internet.defer import Deferred +from twisted.internet.error import DNSLookupError +from twisted.internet.interfaces import IReactorPluggableNameResolver from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactorClock @@ -154,11 +159,46 @@ def render(request, resource, clock): wait_until_result(clock, request) +@implementer(IReactorPluggableNameResolver) class ThreadedMemoryReactorClock(MemoryReactorClock): """ A MemoryReactorClock that supports callFromThread. """ + def __init__(self): + self._udp = [] + self.lookups = {} + + class Resolver(object): + def resolveHostName( + _self, + resolutionReceiver, + hostName, + portNumber=0, + addressTypes=None, + transportSemantics='TCP', + ): + + resolution = HostResolution(hostName) + resolutionReceiver.resolutionBegan(resolution) + if hostName not in self.lookups: + raise DNSLookupError("OH NO") + + resolutionReceiver.addressResolved( + IPv4Address('TCP', self.lookups[hostName], portNumber) + ) + resolutionReceiver.resolutionComplete() + return resolution + + self.nameResolver = Resolver() + super(ThreadedMemoryReactorClock, self).__init__() + + def listenUDP(self, port, protocol, interface='', maxPacketSize=8196): + p = udp.Port(port, protocol, interface, maxPacketSize, self) + p.startListening() + self._udp.append(p) + return p + def callFromThread(self, callback, *args, **kwargs): """ Make the callback fire in the next reactor iteration. -- cgit 1.5.1 From 89a76d18898a0172010c25e5a7c66d57c16b9196 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:33:16 +0100 Subject: Fix handling of redacted events from federation If we receive an event that doesn't pass their content hash check (e.g. due to already being redacted) then we hit a bug which causes an exception to be raised, which then promplty stops the event (and request) from being processed. This effects all sorts of federation APIs, including joining rooms with a redacted state event. --- synapse/events/__init__.py | 5 +++++ synapse/federation/federation_base.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 51f9084b90..b782af6308 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -147,6 +149,9 @@ class EventBase(object): def items(self): return list(self._event_dict.items()) + def keys(self): + return six.iterkeys(self._event_dict) + class FrozenEvent(EventBase): def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 61782ae1c0..b7ad729c63 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -153,7 +153,7 @@ class FederationBase(object): # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if ( - set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and + set(redacted_event.keys()) == set(pdu.keys()) and set(six.iterkeys(redacted_event.content)) == set(six.iterkeys(pdu.content)) ): -- cgit 1.5.1 From ed5331a6272280efd4fc98ae425711acec8c6be5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 16:10:56 +0100 Subject: comment --- synapse/storage/events.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index bc1c18023b..e7487311ce 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1916,6 +1916,8 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore should_delete_params = () if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" + + # We include the parameter twice since we use the expression twice should_delete_params += ( "%:" + self.hs.hostname, "%:" + self.hs.hostname, -- cgit 1.5.1 From bc74925c5b94f0c02603297d4ccb7e05008d5124 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 13 Sep 2018 17:02:59 +0100 Subject: WIP e2e key backups Continues from uhoreg's branch This just fixed the errcode on /room_keys/version if no backup and updates the schema delta to be on the latest so it gets run --- synapse/rest/client/v2_alpha/room_keys.py | 14 ++++++--- synapse/storage/schema/delta/46/e2e_room_keys.sql | 38 ----------------------- synapse/storage/schema/delta/51/e2e_room_keys.sql | 38 +++++++++++++++++++++++ 3 files changed, 48 insertions(+), 42 deletions(-) delete mode 100644 synapse/storage/schema/delta/46/e2e_room_keys.sql create mode 100644 synapse/storage/schema/delta/51/e2e_room_keys.sql (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 1ed18e986f..ea114bc8b4 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, Codes from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, @@ -324,9 +324,15 @@ class RoomKeysVersionServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - info = yield self.e2e_room_keys_handler.get_version_info( - user_id, version - ) + try: + info = yield self.e2e_room_keys_handler.get_version_info( + user_id, version + ) + except SynapseError as e: + if e.code == 404: + e.errcode = Codes.NOT_FOUND + e.msg = "No backup found" + raise e defer.returnValue((200, info)) @defer.inlineCallbacks diff --git a/synapse/storage/schema/delta/46/e2e_room_keys.sql b/synapse/storage/schema/delta/46/e2e_room_keys.sql deleted file mode 100644 index 4531fd56ee..0000000000 --- a/synapse/storage/schema/delta/46/e2e_room_keys.sql +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2017 New Vector Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- users' optionally backed up encrypted e2e sessions -CREATE TABLE e2e_room_keys ( - user_id TEXT NOT NULL, - room_id TEXT NOT NULL, - session_id TEXT NOT NULL, - version TEXT NOT NULL, - first_message_index INT, - forwarded_count INT, - is_verified BOOLEAN, - session_data TEXT NOT NULL -); - -CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); - --- the metadata for each generation of encrypted e2e session backups -CREATE TABLE e2e_room_keys_versions ( - user_id TEXT NOT NULL, - version TEXT NOT NULL, - algorithm TEXT NOT NULL, - auth_data TEXT NOT NULL -); - -CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); diff --git a/synapse/storage/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/delta/51/e2e_room_keys.sql new file mode 100644 index 0000000000..4531fd56ee --- /dev/null +++ b/synapse/storage/schema/delta/51/e2e_room_keys.sql @@ -0,0 +1,38 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- users' optionally backed up encrypted e2e sessions +CREATE TABLE e2e_room_keys ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + session_id TEXT NOT NULL, + version TEXT NOT NULL, + first_message_index INT, + forwarded_count INT, + is_verified BOOLEAN, + session_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); + +-- the metadata for each generation of encrypted e2e session backups +CREATE TABLE e2e_room_keys_versions ( + user_id TEXT NOT NULL, + version TEXT NOT NULL, + algorithm TEXT NOT NULL, + auth_data TEXT NOT NULL +); + +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); -- cgit 1.5.1 From 1c3f4d9ca53b3e3a5ef1849a8da68c6d8cf82938 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:09:13 +1000 Subject: buffer? --- synapse/http/matrixfederationclient.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c49dbacd93..cdbe27eb37 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -221,7 +221,8 @@ class MatrixFederationHttpClient(object): headers=Headers(headers_dict), data=data, agent=self.agent, - reactor=self.hs.get_reactor() + reactor=self.hs.get_reactor(), + unbuffered=True ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) response = yield make_deferred_yieldable( -- cgit 1.5.1 From 7c27c4d51cfd2a3d6504889b1ccdddabf38ae05c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:11:11 +1000 Subject: merge (#3576) --- .circleci/config.yml | 17 +++++++---- .travis.yml | 3 ++ changelog.d/3576.feature | 1 + synapse/http/client.py | 14 ++++++--- synapse/push/httppusher.py | 7 ++++- synapse/push/mailer.py | 7 +++-- synapse/replication/slave/storage/devices.py | 23 ++++++++++----- tox.ini | 44 ++-------------------------- 8 files changed, 55 insertions(+), 61 deletions(-) create mode 100644 changelog.d/3576.feature (limited to 'synapse') diff --git a/.circleci/config.yml b/.circleci/config.yml index 5266544f3c..605430fb3f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,7 +53,7 @@ jobs: steps: - checkout - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 - store_artifacts: path: ~/project/logs destination: logs @@ -76,7 +76,7 @@ jobs: - checkout - run: bash .circleci/merge_base_branch.sh - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 - store_artifacts: path: ~/project/logs destination: logs @@ -101,6 +101,8 @@ workflows: jobs: - sytestpy2 - sytestpy2postgres + - sytestpy3 + - sytestpy3postgres - sytestpy2merged: filters: branches: @@ -109,6 +111,11 @@ workflows: filters: branches: ignore: /develop|master/ -# Currently broken while the Python 3 port is incomplete -# - sytestpy3 -# - sytestpy3postgres + - sytestpy3merged: + filters: + branches: + ignore: /develop|master/ + - sytestpy3postgresmerged: + filters: + branches: + ignore: /develop|master/ diff --git a/.travis.yml b/.travis.yml index ebc972ed24..b3ee66da8f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,9 @@ matrix: services: - postgresql + - python: 3.5 + env: TOX_ENV=py35 + - python: 3.6 env: TOX_ENV=py36 diff --git a/changelog.d/3576.feature b/changelog.d/3576.feature new file mode 100644 index 0000000000..02a10e370d --- /dev/null +++ b/changelog.d/3576.feature @@ -0,0 +1 @@ +Python 3.5+ is now supported. diff --git a/synapse/http/client.py b/synapse/http/client.py index 4ba54fed05..d60f87b04e 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -348,7 +348,8 @@ class SimpleHttpClient(object): resp_headers = dict(response.headers.getAllRawHeaders()) - if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size: + if (b'Content-Length' in resp_headers and + int(resp_headers[b'Content-Length']) > max_size): logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) raise SynapseError( 502, @@ -381,7 +382,12 @@ class SimpleHttpClient(object): ) defer.returnValue( - (length, resp_headers, response.request.absoluteURI, response.code), + ( + length, + resp_headers, + response.request.absoluteURI.decode('ascii'), + response.code, + ), ) @@ -466,9 +472,9 @@ class SpiderEndpointFactory(object): def endpointForURI(self, uri): logger.info("Getting endpoint for %s", uri.toBytes()) - if uri.scheme == "http": + if uri.scheme == b"http": endpoint_factory = HostnameEndpoint - elif uri.scheme == "https": + elif uri.scheme == b"https": tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port) def endpoint_factory(reactor, host, port, **kw): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 81e18bcf7d..48abd5e4d6 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -15,6 +15,8 @@ # limitations under the License. import logging +import six + from prometheus_client import Counter from twisted.internet import defer @@ -26,6 +28,9 @@ from synapse.util.metrics import Measure from . import push_rule_evaluator, push_tools +if six.PY3: + long = int + logger = logging.getLogger(__name__) http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "") @@ -96,7 +101,7 @@ class HttpPusher(object): @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) yield self._process() @defer.inlineCallbacks diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index bfa6df7b68..b78ce10396 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -17,10 +17,11 @@ import email.mime.multipart import email.utils import logging import time -import urllib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from six.moves import urllib + import bleach import jinja2 @@ -474,7 +475,7 @@ class Mailer(object): # XXX: make r0 once API is stable return "%s_matrix/client/unstable/pushers/remove?%s" % ( self.hs.config.public_baseurl, - urllib.urlencode(params), + urllib.parse.urlencode(params), ) @@ -561,7 +562,7 @@ def _create_mxc_to_http_filter(config): return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( config.public_baseurl, serverAndMediaId, - urllib.urlencode(params), + urllib.parse.urlencode(params), fragment or "", ) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 8206a988f7..21b8c468fa 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + from synapse.storage import DataStore from synapse.storage.end_to_end_keys import EndToEndKeyStore from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -21,6 +23,13 @@ from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker +def __func__(inp): + if six.PY3: + return inp + else: + return inp.__func__ + + class SlavedDeviceStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedDeviceStore, self).__init__(db_conn, hs) @@ -38,14 +47,14 @@ class SlavedDeviceStore(BaseSlavedStore): "DeviceListFederationStreamChangeCache", device_list_max, ) - get_device_stream_token = DataStore.get_device_stream_token.__func__ - get_user_whose_devices_changed = DataStore.get_user_whose_devices_changed.__func__ - get_devices_by_remote = DataStore.get_devices_by_remote.__func__ - _get_devices_by_remote_txn = DataStore._get_devices_by_remote_txn.__func__ - _get_e2e_device_keys_txn = DataStore._get_e2e_device_keys_txn.__func__ - mark_as_sent_devices_by_remote = DataStore.mark_as_sent_devices_by_remote.__func__ + get_device_stream_token = __func__(DataStore.get_device_stream_token) + get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed) + get_devices_by_remote = __func__(DataStore.get_devices_by_remote) + _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn) + _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn) + mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote) _mark_as_sent_devices_by_remote_txn = ( - DataStore._mark_as_sent_devices_by_remote_txn.__func__ + __func__(DataStore._mark_as_sent_devices_by_remote_txn) ) count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"] diff --git a/tox.ini b/tox.ini index 085f438989..80ac9324df 100644 --- a/tox.ini +++ b/tox.ini @@ -64,49 +64,11 @@ setenv = {[base]setenv} SYNAPSE_POSTGRES = 1 +[testenv:py35] +usedevelop=true + [testenv:py36] usedevelop=true -commands = - /usr/bin/find "{toxinidir}" -name '*.pyc' -delete - coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/config \ - tests/api/test_filtering.py \ - tests/api/test_ratelimiting.py \ - tests/appservice \ - tests/crypto \ - tests/events \ - tests/handlers/test_appservice.py \ - tests/handlers/test_auth.py \ - tests/handlers/test_device.py \ - tests/handlers/test_directory.py \ - tests/handlers/test_e2e_keys.py \ - tests/handlers/test_presence.py \ - tests/handlers/test_profile.py \ - tests/handlers/test_register.py \ - tests/replication/slave/storage/test_account_data.py \ - tests/replication/slave/storage/test_receipts.py \ - tests/storage/test_appservice.py \ - tests/storage/test_background_update.py \ - tests/storage/test_base.py \ - tests/storage/test__base.py \ - tests/storage/test_client_ips.py \ - tests/storage/test_devices.py \ - tests/storage/test_end_to_end_keys.py \ - tests/storage/test_event_push_actions.py \ - tests/storage/test_keys.py \ - tests/storage/test_presence.py \ - tests/storage/test_profile.py \ - tests/storage/test_registration.py \ - tests/storage/test_room.py \ - tests/storage/test_user_directory.py \ - tests/test_distributor.py \ - tests/test_dns.py \ - tests/test_preview.py \ - tests/test_test_utils.py \ - tests/test_types.py \ - tests/util} \ - {env:TOXSUFFIX:} - {env:DUMP_COVERAGE_COMMAND:coverage report -m} [testenv:packaging] deps = -- cgit 1.5.1 From 63755fa4c25f120941aba24d6192815313d12ea7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:21:47 +1000 Subject: we do that higher up --- synapse/http/matrixfederationclient.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cdbe27eb37..d1a6e87706 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -91,6 +91,7 @@ class MatrixFederationHttpClient(object): self.server_name = hs.hostname reactor = hs.get_reactor() pool = HTTPConnectionPool(reactor) + pool.retryAutomatically = False pool.maxPersistentPerHost = 5 pool.cachedConnectionTimeout = 2 * 60 self.agent = Agent.usingEndpointFactory( -- cgit 1.5.1 From 7c33ab76da91a747fd442f65c9bfe187a81864f7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:45:34 +1000 Subject: redact better --- synapse/http/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 58ef8d3ce4..a3f9e4f67c 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -38,12 +38,12 @@ def cancelled_to_request_timed_out_error(value, timeout): return value -ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') def redact_uri(uri): """Strips access tokens from the uri replaces with """ return ACCESS_TOKEN_RE.sub( - br'\1\3', + r'\1\3', uri ) -- cgit 1.5.1 From f1a72646636c08e5e0f6dc6f583d0bffebf94c85 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 13 Sep 2018 11:51:12 -0600 Subject: Fix minor typo in exception --- synapse/replication/tcp/streams.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py index 55fe701c5c..c1e626be3f 100644 --- a/synapse/replication/tcp/streams.py +++ b/synapse/replication/tcp/streams.py @@ -196,7 +196,7 @@ class Stream(object): ) if len(rows) >= MAX_EVENTS_BEHIND: - raise Exception("stream %s has fallen behined" % (self.NAME)) + raise Exception("stream %s has fallen behind" % (self.NAME)) else: rows = yield self.update_function( from_token, current_token, -- cgit 1.5.1 From 8f08d848f53bc79cdd47f8d4025313268a16b227 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:53:56 +1000 Subject: fix --- synapse/http/site.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/http/site.py b/synapse/http/site.py index f0828c6542..18f2d37e00 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,7 @@ class SynapseRequest(Request): return "%s-%i" % (self.method, self.request_seq) def get_redacted_uri(self): - return redact_uri(self.uri) + return redact_uri(self.uri.decode('ascii')) def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] -- cgit 1.5.1 From c971aa7b9d5b4d3900404a285f34dd5004a76f37 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:57:02 +1000 Subject: fix --- synapse/http/site.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/http/site.py b/synapse/http/site.py index 18f2d37e00..e1e53e8ae5 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,10 @@ class SynapseRequest(Request): return "%s-%i" % (self.method, self.request_seq) def get_redacted_uri(self): - return redact_uri(self.uri.decode('ascii')) + uri = self.uri + if isinstance(uri, bytes): + uri = self.uri.decode('ascii') + return redact_uri(uri) def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] -- cgit 1.5.1 From 7de1989ea2809c13b67be6c4990b269d817fae44 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 13 Sep 2018 22:43:50 +0100 Subject: fix link for case that config.email_riot_base_url is set --- synapse/push/mailer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b78ce10396..1a5a10d974 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -441,7 +441,7 @@ class Mailer(object): def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: - base_url = self.hs.config.email_riot_base_url + base_url = "%s/#/room" % (self.hs.config.email_riot_base_url) elif self.app_name == "Vector": # need /beta for Universal Links to work on iOS base_url = "https://vector.im/beta/#/room" -- cgit 1.5.1 From 0a81038ea0e308d2944b7deae43060e4982b0abe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 14:39:59 +0100 Subject: Add in flight real time metrics for Measure blocks --- synapse/metrics/__init__.py | 110 +++++++++++++++++++++++++++++++++++++++++++- synapse/util/metrics.py | 22 +++++++++ 2 files changed, 131 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 550f8443f7..98333b2048 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -18,8 +18,11 @@ import gc import logging import os import platform +import threading import time +import six + import attr from prometheus_client import Counter, Gauge, Histogram from prometheus_client.core import REGISTRY, GaugeMetricFamily @@ -68,7 +71,7 @@ class LaterGauge(object): return if isinstance(calls, dict): - for k, v in calls.items(): + for k, v in six.iteritems(calls): g.add_metric(k, v) else: g.add_metric([], calls) @@ -87,6 +90,111 @@ class LaterGauge(object): all_gauges[self.name] = self +class InFlightGauge(object): + """Tracks number of things (e.g. requests, Measure blocks, etc) in flight + at any given time. + + Each InFlightGauge will create a metric called `_total` that counts + the number of in flight blocks, as well as a metrics for each item in the + given `sub_metrics` as `_` which will get updated by the + callbacks. + + Args: + name (str) + desc (str) + labels (list[str]) + sub_metrics (list[str]): A list of sub metrics that the callbacks + will update. + """ + + # TODO: Expand this to + + def __init__(self, name, desc, labels, sub_metrics): + self.name = name + self.desc = desc + self.labels = labels + self.sub_metrics = sub_metrics + + # Create a class which have the sub_metrics values as attributes, which + # default to 0 on initialization. Used to pass to registered callbacks. + self._metrics_class = attr.make_class( + "_MetricsEntry", + attrs={x: attr.ib(0) for x in sub_metrics}, + slots=True, + ) + + # Counts number of in flight blocks for a given set of label values + self._registrations = {} + + # Protects access to _registrations + self._lock = threading.Lock() + + self._register_with_collector() + + def register(self, key, callback): + """Registers that we've entered a new block with labels `key`. + + `callback` gets called each time the metrics are collected. The same + value must also be given to `unregister`. + + `callback` gets called with an object that has an attribute per + sub_metric, which should be updated with the necessary values. Note that + the metrics object is shared between all callbacks registered with the + same key. + + Note that `callback` may be called on a separate thread. + """ + with self._lock: + self._registrations.setdefault(key, set()).add(callback) + + def unregister(self, key, callback): + """Registers that we've exited a block with labels `key`. + """ + + with self._lock: + self._registrations.setdefault(key, set()).discard(callback) + + def collect(self): + """Called by prometheus client when it reads metrics. + + Note: may be called by a separate thread. + """ + in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) + + metrics_by_key = {} + + # We copy so that we don't mutate the list while iterating + with self._lock: + keys = list(self._registrations) + + for key in keys: + with self._lock: + callbacks = set(self._registrations[key]) + + in_flight.add_metric(key, len(callbacks)) + + metrics = self._metrics_class() + metrics_by_key[key] = metrics + for callback in callbacks: + callback(metrics) + + yield in_flight + + for name in self.sub_metrics: + gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) + for key, metrics in six.iteritems(metrics_by_key): + gauge.add_metric(key, getattr(metrics, name)) + yield gauge + + def _register_with_collector(self): + if self.name in all_gauges.keys(): + logger.warning("%s already registered, reregistering" % (self.name,)) + REGISTRY.unregister(all_gauges.pop(self.name)) + + REGISTRY.register(self) + all_gauges[self.name] = self + + # # Detailed CPU metrics # diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 97f1267380..4b4ac5f6c7 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -20,6 +20,7 @@ from prometheus_client import Counter from twisted.internet import defer +from synapse.metrics import InFlightGauge from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) @@ -45,6 +46,13 @@ block_db_txn_duration = Counter( block_db_sched_duration = Counter( "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) +# Tracks the number of blocks currently active +in_flight = InFlightGauge( + "synapse_util_metrics_block_in_flight", "", + labels=["block_name"], + sub_metrics=["real_time_max", "real_time_sum"], +) + def measure_func(name): def wrapper(func): @@ -82,10 +90,14 @@ class Measure(object): self.start_usage = self.start_context.get_resource_usage() + in_flight.register((self.name,), self._update_in_flight) + def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: return + in_flight.unregister((self.name,), self._update_in_flight) + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() @@ -120,3 +132,13 @@ class Measure(object): if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) + + def _update_in_flight(self, metrics): + """Gets called when processing in flight metrics + """ + duration = self.clock.time() - self.start + + metrics.real_time_max = max(metrics.real_time_max, duration) + metrics.real_time_sum += duration + + # TODO: Add other in flight metrics. -- cgit 1.5.1 From 9e2f9a7b57a8c4d2238684a9ccd6145948229e2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 15:11:26 +0100 Subject: Measure outbound requests --- synapse/http/matrixfederationclient.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c49dbacd93..c3542b9353 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -43,6 +43,7 @@ from synapse.api.errors import ( from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") @@ -224,9 +225,11 @@ class MatrixFederationHttpClient(object): reactor=self.hs.get_reactor() ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) - response = yield make_deferred_yieldable( - request_deferred, - ) + + with Measure(self.clock, "outbound_request"): + response = yield make_deferred_yieldable( + request_deferred, + ) log_result = "%d %s" % (response.code, response.phrase,) break -- cgit 1.5.1 From d0f6c1ce2189693db40a385f9ff19bdbbc5c87cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 15:12:36 +0100 Subject: Remove spurious comment --- synapse/metrics/__init__.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse') diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 98333b2048..59900aa5d1 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -107,8 +107,6 @@ class InFlightGauge(object): will update. """ - # TODO: Expand this to - def __init__(self, name, desc, labels, sub_metrics): self.name = name self.desc = desc -- cgit 1.5.1 From bc9af88a2d971cd75aad419c60e13b0778640b28 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 15 Sep 2018 00:26:00 +1000 Subject: fix --- synapse/http/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/http/client.py b/synapse/http/client.py index d60f87b04e..ec339a92ad 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -93,7 +93,7 @@ class SimpleHttpClient(object): outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) - logger.info("Sending request %s %s", method, redact_uri(uri.encode('ascii'))) + logger.info("Sending request %s %s", method, redact_uri(uri)) try: request_deferred = treq.request( @@ -108,14 +108,14 @@ class SimpleHttpClient(object): incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", - method, redact_uri(uri.encode('ascii')), response.code + method, redact_uri(uri), response.code ) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", - method, redact_uri(uri.encode('ascii')), type(e).__name__, e.args[0] + method, redact_uri(uri), type(e).__name__, e.args[0] ) raise -- cgit 1.5.1 From 024be6cf1891fc20baac2356246595dc7f8ba5c5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 14 Sep 2018 18:12:52 +0100 Subject: don't filter membership events based on history visibility (#3874) don't filter membership events based on history visibility as we will already have filtered the messages in the timeline, and state events are always visible. and because @erikjohnston said so. --- changelog.d/3874.bugfix | 0 synapse/handlers/pagination.py | 9 +-------- 2 files changed, 1 insertion(+), 8 deletions(-) create mode 100644 changelog.d/3874.bugfix (limited to 'synapse') diff --git a/changelog.d/3874.bugfix b/changelog.d/3874.bugfix new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5170d093e3..a155b6e938 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -269,14 +269,7 @@ class PaginationHandler(object): if state_ids: state = yield self.store.get_events(list(state_ids.values())) - - if state: - state = yield filter_events_for_client( - self.store, - user_id, - state.values(), - is_peeking=(member_event_id is None), - ) + state = state.values() time_now = self.clock.time_msec() -- cgit 1.5.1 From fcfe7a850dada2e65982f2bc805d8bc409f07512 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 19:23:07 +0100 Subject: Add an awful secondary timeout to fix wedged requests This is an attempt to mitigate #3842 by adding yet-another-timeout --- synapse/http/matrixfederationclient.py | 11 ++++++++ synapse/util/async_helpers.py | 51 ++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index da16b5dd8c..13b19f7626 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -42,6 +42,7 @@ from synapse.api.errors import ( ) from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext +from synapse.util.async_helpers import timeout_no_seriously from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure @@ -228,6 +229,16 @@ class MatrixFederationHttpClient(object): ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) + # Sometimes the timeout above doesn't work, so lets hack yet + # another layer of timeouts in in the vain hope that at some + # point the world made sense and this really really really + # should work. + request_deferred = timeout_no_seriously( + request_deferred, + timeout=_sec_timeout * 2, + reactor=self.hs.get_reactor(), + ) + with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 9b3f2f4b96..083e4f4128 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -438,3 +438,54 @@ def _cancelled_to_timed_out_error(value, timeout): value.trap(CancelledError) raise DeferredTimeoutError(timeout, "Deferred") return value + + +def timeout_no_seriously(deferred, timeout, reactor): + """The in build twisted deferred addTimeout (and the method above) + completely fail to time things out under some unknown circumstances. + + Lets try a different way of timing things out and maybe that will make + things work?! + + TODO: Kill this with fire. + """ + + new_d = defer.Deferred() + + timed_out = [False] + + def time_it_out(): + timed_out[0] = True + deferred.cancel() + + if not new_d.called: + new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + + delayed_call = reactor.callLater(timeout, time_it_out) + + def convert_cancelled(value): + if timed_out[0]: + return _cancelled_to_timed_out_error(value, timeout) + return value + + deferred.addBoth(convert_cancelled) + + def cancel_timeout(result): + # stop the pending call to cancel the deferred if it's been fired + if delayed_call.active(): + delayed_call.cancel() + return result + + deferred.addBoth(cancel_timeout) + + def success_cb(val): + if not new_d.called: + new_d.callback(val) + + def failure_cb(val): + if not new_d.called: + new_d.errback(val) + + deferred.addCallbacks(success_cb, failure_cb) + + return new_d -- cgit 1.5.1 From 24efb2a70dfd8aa100507612836601991e11affc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 15 Sep 2018 11:38:39 +0100 Subject: Fix timeout function Turns out deferred.cancel sometimes throws, so we do that last to ensure that we always do resolve the new deferred. --- synapse/util/async_helpers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 083e4f4128..40c2946129 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -456,11 +456,12 @@ def timeout_no_seriously(deferred, timeout, reactor): def time_it_out(): timed_out[0] = True - deferred.cancel() if not new_d.called: new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + deferred.cancel() + delayed_call = reactor.callLater(timeout, time_it_out) def convert_cancelled(value): -- cgit 1.5.1 From d42d79e3c3cc229fa6bb268a97edde4ac81e2df5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 22:27:41 +0100 Subject: don't ratelimit autojoins --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1e53f2c635..da914c46ff 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -534,4 +534,5 @@ class RegistrationHandler(BaseHandler): room_id=room_id, remote_room_hosts=remote_room_hosts, action="join", + ratelimit=False, ) -- cgit 1.5.1 From 85a43f4167b5e775f7c3afe96d71137818561272 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 13:19:00 +0100 Subject: Return a 404 when deleting unknown room alias As per https://github.com/matrix-org/matrix-doc/issues/1675 Fixes https://github.com/matrix-org/synapse/issues/2782 --- synapse/handlers/directory.py | 19 ++++++++++++++++--- synapse/storage/directory.py | 1 - 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ef866da1b6..c745e6740b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -20,7 +20,14 @@ import string from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + CodeMessageException, + Codes, + NotFoundError, + StoreError, + SynapseError, +) from synapse.types import RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler): def delete_association(self, requester, user_id, room_alias): # association deletion for human users - can_delete = yield self._user_can_delete_alias(room_alias, user_id) + try: + can_delete = yield self._user_can_delete_alias(room_alias, user_id) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown room alias") + raise + if not can_delete: raise AuthError( 403, "You don't have permission to delete the alias.", @@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator and creator == user_id: + if creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 808194236a..cfb687cb53 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -75,7 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore): }, retcol="creator", desc="get_room_alias_creator", - allow_none=True ) @cached(max_entries=5000) -- cgit 1.5.1 From fe88907d044c79844612b3950421b48c20fd22ad Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Sep 2018 22:33:22 +1000 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 9dbe0b9f10..f6a194dfc2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.4" +__version__ = "0.33.5rc1" -- cgit 1.5.1 From f00a9d2636211e6762d8c8c627ebee3f6cea05c9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:15:42 +0100 Subject: Fix some b'abcd' noise in logs and metrics Python 3 compatibility: make sure that we decode some byte sequences before we use them to create log lines and metrics labels. --- synapse/http/matrixfederationclient.py | 5 ++++- synapse/http/site.py | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 13b19f7626..083484a687 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -244,7 +244,10 @@ class MatrixFederationHttpClient(object): request_deferred, ) - log_result = "%d %s" % (response.code, response.phrase,) + log_result = "%d %s" % ( + response.code, + response.phrase.decode('ascii', errors='replace'), + ) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): diff --git a/synapse/http/site.py b/synapse/http/site.py index e1e53e8ae5..2191de9836 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -82,7 +82,7 @@ class SynapseRequest(Request): ) def get_request_id(self): - return "%s-%i" % (self.method, self.request_seq) + return "%s-%i" % (self.method.decode('ascii'), self.request_seq) def get_redacted_uri(self): uri = self.uri @@ -119,7 +119,7 @@ class SynapseRequest(Request): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.labels(self.method, + requests_counter.labels(self.method.decode('ascii'), self.request_metrics.name).inc() @contextlib.contextmanager @@ -280,9 +280,9 @@ class SynapseRequest(Request): int(usage.db_txn_count), self.sentLength, code, - self.method, + self.method.decode('ascii'), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), user_agent, usage.evt_db_fetch_count, ) -- cgit 1.5.1 From 2b39494cd5a0a93811024c31d8c41e7b5e9e21a3 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 16:35:18 +0100 Subject: Add python_version phone home stat --- synapse/app/homeserver.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index ac97e19649..3d9d303839 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -457,6 +457,8 @@ def run(hs): stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = (version.major) + (version.minor / 10) + (version.micro / 100) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.5.1 From f75b9961c6c782d2ca4586782aa5cd2c0ae9a5b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:52:02 +0100 Subject: Reinstate missing null check --- synapse/handlers/directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index c745e6740b..18741c5fac 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -333,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator == user_id: + if creator is not None and creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) -- cgit 1.5.1 From 9a1cceeca90e6ae52433d50dfd866d68836f686e Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:09:06 +0100 Subject: Use a string for versions --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3d9d303839..be9be423f0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -458,7 +458,7 @@ def run(hs): stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info - stats["python_version"] = (version.major) + (version.minor / 10) + (version.micro / 100) + stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.5.1 From ac80cb08fe247812b46c70843b2d5c9764cca619 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 17:16:50 +0100 Subject: Fix more b'abcd' noise in metrics --- synapse/http/request_metrics.py | 22 +++++++++++----------- synapse/http/site.py | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'synapse') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 72c2654678..fedb4e6b18 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -162,7 +162,7 @@ class RequestMetrics(object): with _in_flight_requests_lock: _in_flight_requests.add(self) - def stop(self, time_sec, request): + def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) @@ -179,35 +179,35 @@ class RequestMetrics(object): ) return - response_code = str(request.code) + response_code = str(response_code) - outgoing_responses_counter.labels(request.method, response_code).inc() + outgoing_responses_counter.labels(self.method, response_code).inc() - response_count.labels(request.method, self.name, tag).inc() + response_count.labels(self.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag, response_code).observe( + response_timer.labels(self.method, self.name, tag, response_code).observe( time_sec - self.start ) resource_usage = context.get_resource_usage() - response_ru_utime.labels(request.method, self.name, tag).inc( + response_ru_utime.labels(self.method, self.name, tag).inc( resource_usage.ru_utime, ) - response_ru_stime.labels(request.method, self.name, tag).inc( + response_ru_stime.labels(self.method, self.name, tag).inc( resource_usage.ru_stime, ) - response_db_txn_count.labels(request.method, self.name, tag).inc( + response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count ) - response_db_txn_duration.labels(request.method, self.name, tag).inc( + response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec ) - response_db_sched_duration.labels(request.method, self.name, tag).inc( + response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec ) - response_size.labels(request.method, self.name, tag).inc(request.sentLength) + response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in diff --git a/synapse/http/site.py b/synapse/http/site.py index 2191de9836..9579e8cd0d 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -288,7 +288,7 @@ class SynapseRequest(Request): ) try: - self.request_metrics.stop(self.finish_time, self) + self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: logger.warn("Failed to stop metrics: %r", e) -- cgit 1.5.1 From b58714789fa21fb6907d8c46d3155472d09bc8e6 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:35:54 +0100 Subject: make pip happy? --- synapse/app/homeserver.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index be9be423f0..ea57350d46 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit -from synapse.util.versionstring import get_version_string +from synapse.util.\\\\string import get_version_string logger = logging.getLogger("synapse.app.homeserver") @@ -458,7 +458,9 @@ def run(hs): stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info - stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() -- cgit 1.5.1 From 5baa087312f056fa3730581d66f220f49eed8fdd Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 17 Sep 2018 17:37:56 +0100 Subject: typo --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index ea57350d46..3241ded188 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit -from synapse.util.\\\\string import get_version_string +from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.homeserver") -- cgit 1.5.1 From 550007cb0eb12c7ddf0510afdb23195c295de396 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 15:02:51 +0100 Subject: Bump timeout on get_missing_events request --- synapse/handlers/federation.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0c68e8a472..f10b46414b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -360,6 +360,35 @@ class FederationHandler(BaseHandler): # apparently. # # see https://github.com/matrix-org/synapse/pull/1744 + # + # ---- + # + # Update richvdh 2018/09/18: There are a number of problems with timing this + # request out agressively on the client side: + # + # - it plays badly with the server-side rate-limiter, which starts tarpitting you + # if you send too many requests at once, so you end up with the server carefully + # working through the backlog of your requests, which you have already timed + # out. + # + # - for this request in particular, we now (as of + # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the + # server can't produce a plausible-looking set of prev_events - so we becone + # much more likely to reject the event. + # + # - contrary to what it says above, we do *not* fall back to fetching fresh state + # for the room if get_missing_events times out. Rather, we give up processing + # the PDU whose prevs we are missing, which then makes it much more likely that + # we'll end up back here for the *next* PDU in the list, which exacerbates the + # problem. + # + # - the agressive 10s timeout was introduced to deal with incoming federation + # requests taking 8 hours to process. It's not entirely clear why that was going + # on; certainly there were other issues causing traffic storms which are now + # resolved, and I think in any case we may be more sensible about our locking + # now. We're *certainly* more sensible about our logging. + # + # All that said: Let's try increasing the timout to 60s and see what happens. missing_events = yield self.federation_client.get_missing_events( origin, @@ -368,7 +397,7 @@ class FederationHandler(BaseHandler): latest_events=[pdu], limit=10, min_depth=min_depth, - timeout=10000, + timeout=60000, ) logger.info( -- cgit 1.5.1 From b3097396e7cf8d29255b4855e9bf06c6b98a549e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Sep 2018 17:01:12 +0100 Subject: Set SNI to the server_name, not whatever was in the SRV record Fixes #3843 --- synapse/http/endpoint.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index b0c9369519..91025037a3 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -108,7 +108,7 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= Args: reactor: Twisted reactor. - destination (bytes): The name of the server to connect to. + destination (unicode): The name of the server to connect to. tls_client_options_factory (synapse.crypto.context_factory.ClientTLSOptionsFactory): Factory which generates TLS options for client connections. @@ -126,10 +126,17 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= transport_endpoint = HostnameEndpoint default_port = 8008 else: + # the SNI string should be the same as the Host header, minus the port. + # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777, + # the Host header and SNI should therefore be the server_name of the remote + # server. + tls_options = tls_client_options_factory.get_options(domain) + def transport_endpoint(reactor, host, port, timeout): return wrapClientTLS( - tls_client_options_factory.get_options(host), - HostnameEndpoint(reactor, host, port, timeout=timeout)) + tls_options, + HostnameEndpoint(reactor, host, port, timeout=timeout), + ) default_port = 8448 if port is None: -- cgit 1.5.1 From 31c15dcb80c8f11fd03dbb9b0ccff4777dc8e457 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Sep 2018 18:17:15 +0100 Subject: Refactor matrixfederationclient to fix logging (#3906) We want to wait until we have read the response body before we log the request as complete, otherwise a confusing thing happens where the request appears to have completed, but we later fail it. To do this, we factor the salient details of a request out to a separate object, which can then keep track of the txn_id, so that it can be logged. --- changelog.d/3906.misc | 1 + synapse/http/matrixfederationclient.py | 385 +++++++++++++++++++------------ tests/http/test_fedclient.py | 43 +++- tests/replication/slave/storage/_base.py | 35 +-- tests/server.py | 81 +++++++ 5 files changed, 359 insertions(+), 186 deletions(-) create mode 100644 changelog.d/3906.misc (limited to 'synapse') diff --git a/changelog.d/3906.misc b/changelog.d/3906.misc new file mode 100644 index 0000000000..11709186d3 --- /dev/null +++ b/changelog.d/3906.misc @@ -0,0 +1 @@ +Improve logging of outbound federation requests \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 083484a687..6a2d447289 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,10 +17,12 @@ import cgi import logging import random import sys +from io import BytesIO from six import PY3, string_types from six.moves import urllib +import attr import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter @@ -28,8 +30,9 @@ from signedjson.sign import sign_json from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError +from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool +from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool from twisted.web.http_headers import Headers import synapse.metrics @@ -41,13 +44,11 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util import logcontext from synapse.util.async_helpers import timeout_no_seriously from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure logger = logging.getLogger(__name__) -outbound_logger = logging.getLogger("synapse.http.outbound") outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) @@ -78,6 +79,93 @@ class MatrixFederationEndpointFactory(object): ) +_next_id = 1 + + +@attr.s +class MatrixFederationRequest(object): + method = attr.ib() + """HTTP method + :type: str + """ + + path = attr.ib() + """HTTP path + :type: str + """ + + destination = attr.ib() + """The remote server to send the HTTP request to. + :type: str""" + + json = attr.ib(default=None) + """JSON to send in the body. + :type: dict|None + """ + + json_callback = attr.ib(default=None) + """A callback to generate the JSON. + :type: func|None + """ + + query = attr.ib(default=None) + """Query arguments. + :type: dict|None + """ + + txn_id = attr.ib(default=None) + """Unique ID for this request (for logging) + :type: str|None + """ + + def __attrs_post_init__(self): + global _next_id + self.txn_id = "%s-O-%s" % (self.method, _next_id) + _next_id = (_next_id + 1) % (MAXINT - 1) + + def get_json(self): + if self.json_callback: + return self.json_callback() + return self.json + + +@defer.inlineCallbacks +def _handle_json_response(reactor, timeout_sec, request, response): + """ + Reads the JSON body of a response, with a timeout + + Args: + reactor (IReactor): twisted reactor, for the timeout + timeout_sec (float): number of seconds to wait for response to complete + request (MatrixFederationRequest): the request that triggered the response + response (IResponse): response to the request + + Returns: + dict: parsed JSON response + """ + try: + check_content_type_is_json(response.headers) + d = treq.json_content(response) + d.addTimeout(timeout_sec, reactor) + body = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%d] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) + raise + logger.info( + "{%s} [%d] Completed: %d %s", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) + defer.returnValue(body) + + class MatrixFederationHttpClient(object): """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. @@ -102,34 +190,35 @@ class MatrixFederationHttpClient(object): self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string = hs.version_string.encode('ascii') - self._next_id = 1 self.default_timeout = 60 - def _create_url(self, destination, path_bytes, param_bytes, query_bytes): - return urllib.parse.urlunparse( - (b"matrix", destination, path_bytes, param_bytes, query_bytes, b"") - ) + def schedule(x): + reactor.callLater(_EPSILON, x) + + self._cooperator = Cooperator(scheduler=schedule) @defer.inlineCallbacks - def _request(self, destination, method, path, - json=None, json_callback=None, - param_bytes=b"", - query=None, retry_on_dns_fail=True, - timeout=None, long_retries=False, - ignore_backoff=False, - backoff_on_404=False): + def _send_request( + self, + request, + retry_on_dns_fail=True, + timeout=None, + long_retries=False, + ignore_backoff=False, + backoff_on_404=False + ): """ - Creates and sends a request to the given server. + Sends a request to the given server. Args: - destination (str): The remote server to send the HTTP request to. - method (str): HTTP method - path (str): The HTTP path - json (dict or None): JSON to send in the body. - json_callback (func or None): A callback to generate the JSON. - query (dict or None): Query arguments. + request (MatrixFederationRequest): details of request to be sent + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server). 60s by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + backoff_on_404 (bool): Back off if we get a 404 Returns: @@ -154,38 +243,32 @@ class MatrixFederationHttpClient(object): if ( self.hs.config.federation_domain_whitelist is not None and - destination not in self.hs.config.federation_domain_whitelist + request.destination not in self.hs.config.federation_domain_whitelist ): - raise FederationDeniedError(destination) + raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( - destination, + request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) - headers_dict = {} - path_bytes = path.encode("ascii") - if query: - query_bytes = encode_query_args(query) + method = request.method + destination = request.destination + path_bytes = request.path.encode("ascii") + if request.query: + query_bytes = encode_query_args(request.query) else: query_bytes = b"" headers_dict = { "User-Agent": [self.version_string], - "Host": [destination], + "Host": [request.destination], } with limiter: - url = self._create_url( - destination.encode("ascii"), path_bytes, param_bytes, query_bytes - ).decode('ascii') - - txn_id = "%s-O-%s" % (method, self._next_id) - self._next_id = (self._next_id + 1) % (MAXINT - 1) - # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -193,16 +276,19 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - http_url = urllib.parse.urlunparse( - (b"", b"", path_bytes, param_bytes, query_bytes, b"") - ).decode('ascii') + url = urllib.parse.urlunparse(( + b"matrix", destination.encode("ascii"), + path_bytes, None, query_bytes, b"", + )).decode('ascii') + + http_url = urllib.parse.urlunparse(( + b"", b"", + path_bytes, None, query_bytes, b"", + )).decode('ascii') - log_result = None while True: try: - if json_callback: - json = json_callback() - + json = request.get_json() if json: data = encode_canonical_json(json) headers_dict["Content-Type"] = ["application/json"] @@ -213,16 +299,24 @@ class MatrixFederationHttpClient(object): data = None self.sign_request(destination, method, http_url, headers_dict) - outbound_logger.info( + logger.info( "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url + request.txn_id, destination, method, url ) + if data: + producer = FileBodyProducer( + BytesIO(data), + cooperator=self._cooperator + ) + else: + producer = None + request_deferred = treq.request( method, url, headers=Headers(headers_dict), - data=data, + data=producer, agent=self.agent, reactor=self.hs.get_reactor(), unbuffered=True @@ -244,33 +338,19 @@ class MatrixFederationHttpClient(object): request_deferred, ) - log_result = "%d %s" % ( - response.code, - response.phrase.decode('ascii', errors='replace'), - ) break except Exception as e: - if not retry_on_dns_fail and isinstance(e, DNSLookupError): - logger.warn( - "DNS Lookup failed to %s with %s", - destination, - e - ) - log_result = "DNS Lookup failed to %s with %s" % ( - destination, e - ) - raise - logger.warn( - "{%s} Sending request failed to %s: %s %s: %s", - txn_id, + "{%s} [%s] Request failed: %s %s: %s", + request.txn_id, destination, method, url, _flatten_response_never_received(e), ) - log_result = _flatten_response_never_received(e) + if not retry_on_dns_fail and isinstance(e, DNSLookupError): + raise if retries_left and not timeout: if long_retries: @@ -283,33 +363,33 @@ class MatrixFederationHttpClient(object): delay *= random.uniform(0.8, 1.4) logger.debug( - "{%s} Waiting %s before sending to %s...", - txn_id, + "{%s} [%s] Waiting %ss before re-sending...", + request.txn_id, + destination, delay, - destination ) yield self.clock.sleep(delay) retries_left -= 1 else: raise - finally: - outbound_logger.info( - "{%s} [%s] Result: %s", - txn_id, - destination, - log_result, - ) + + logger.info( + "{%s} [%s] Got response headers: %d %s", + request.txn_id, + destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? - with logcontext.PreserveLoggingContext(): - d = treq.content(response) - d.addTimeout(_sec_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + d = treq.content(response) + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body ) @@ -403,29 +483,26 @@ class MatrixFederationHttpClient(object): is not on our federation whitelist """ - if not json_data_callback: - json_data_callback = lambda: data - - response = yield self._request( - destination, - "PUT", - path, - json_callback=json_data_callback, + request = MatrixFederationRequest( + method="PUT", + destination=destination, + path=path, query=args, + json_callback=json_data_callback, + json=data, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, backoff_on_404=backoff_on_404, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -459,31 +536,30 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "POST", - path, + + request = MatrixFederationRequest( + method="POST", + destination=destination, + path=path, query=args, json=data, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - if timeout: - _sec_timeout = timeout / 1000 - else: - _sec_timeout = self.default_timeout - - d.addTimeout(_sec_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = yield _handle_json_response( + self.hs.get_reactor(), _sec_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -519,25 +595,23 @@ class MatrixFederationHttpClient(object): logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - response = yield self._request( - destination, - "GET", - path, + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) - + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -568,25 +642,23 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "DELETE", - path, + request = MatrixFederationRequest( + method="DELETE", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - d = treq.json_content(response) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - body = yield make_deferred_yieldable(d) - + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -614,11 +686,15 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( - destination, - "GET", - path, + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, query=args, + ) + + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff, ) @@ -626,14 +702,25 @@ class MatrixFederationHttpClient(object): headers = dict(response.headers.getAllRawHeaders()) try: - with logcontext.PreserveLoggingContext(): - d = _readBodyToFile(response, output_stream, max_size) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) - length = yield make_deferred_yieldable(d) - except Exception: - logger.exception("Failed to download body") + d = _readBodyToFile(response, output_stream, max_size) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + length = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%d] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) raise - + logger.info( + "{%s} [%d] Completed: %d %s [%d bytes]", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + length, + ) defer.returnValue((length, headers)) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 1c46c9cfeb..66c09f63b6 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -18,9 +18,14 @@ from mock import Mock from twisted.internet.defer import TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError from twisted.web.client import ResponseNeverReceived +from twisted.web.http import HTTPChannel -from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.http.matrixfederationclient import ( + MatrixFederationHttpClient, + MatrixFederationRequest, +) +from tests.server import FakeTransport from tests.unittest import HomeserverTestCase @@ -40,7 +45,7 @@ class FederationClientTests(HomeserverTestCase): """ If the DNS raising returns an error, it will bubble up. """ - d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000) self.pump() f = self.failureResultOf(d) @@ -51,7 +56,7 @@ class FederationClientTests(HomeserverTestCase): If the HTTP request is not connected and is timed out, it'll give a ConnectingCancelledError. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) self.pump() @@ -78,7 +83,7 @@ class FederationClientTests(HomeserverTestCase): If the HTTP request is connected, but gets no response before being timed out, it'll give a ResponseNeverReceived. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) self.pump() @@ -108,7 +113,12 @@ class FederationClientTests(HomeserverTestCase): """ Once the client gets the headers, _request returns successfully. """ - d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + request = MatrixFederationRequest( + method="GET", + destination="testserv:8008", + path="foo/bar", + ) + d = self.cl._send_request(request, timeout=10000) self.pump() @@ -155,3 +165,26 @@ class FederationClientTests(HomeserverTestCase): f = self.failureResultOf(d) self.assertIsInstance(f.value, TimeoutError) + + def test_client_sends_body(self): + self.cl.post_json( + "testserv:8008", "foo/bar", timeout=10000, + data={"a": "b"} + ) + + self.pump() + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + client = clients[0][2].buildProtocol(None) + server = HTTPChannel() + + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + self.pump(0.1) + + self.assertEqual(len(server.requests), 1) + request = server.requests[0] + content = request.content.read() + self.assertEqual(content, b'{"a":"b"}') diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 089cecfbee..9e9fbbfe93 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -15,8 +15,6 @@ from mock import Mock, NonCallableMock -import attr - from synapse.replication.tcp.client import ( ReplicationClientFactory, ReplicationClientHandler, @@ -24,6 +22,7 @@ from synapse.replication.tcp.client import ( from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from tests import unittest +from tests.server import FakeTransport class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): @@ -56,36 +55,8 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) - @attr.s - class FakeTransport(object): - - other = attr.ib() - disconnecting = False - buffer = attr.ib(default=b'') - - def registerProducer(self, producer, streaming): - - self.producer = producer - - def _produce(): - self.producer.resumeProducing() - reactor.callLater(0.1, _produce) - - reactor.callLater(0.0, _produce) - - def write(self, byt): - self.buffer = self.buffer + byt - - if getattr(self.other, "transport") is not None: - self.other.dataReceived(self.buffer) - self.buffer = b"" - - def writeSequence(self, seq): - for x in seq: - self.write(x) - - client.makeConnection(FakeTransport(server)) - server.makeConnection(FakeTransport(client)) + client.makeConnection(FakeTransport(server, reactor)) + server.makeConnection(FakeTransport(client, reactor)) def replicate(self): """Tell the master side of replication that something has happened, and then diff --git a/tests/server.py b/tests/server.py index 420ec4e088..ccea3baa55 100644 --- a/tests/server.py +++ b/tests/server.py @@ -280,3 +280,84 @@ def get_clock(): clock = ThreadedMemoryReactorClock() hs_clock = Clock(clock) return (clock, hs_clock) + + +@attr.s +class FakeTransport(object): + """ + A twisted.internet.interfaces.ITransport implementation which sends all its data + straight into an IProtocol object: it exists to connect two IProtocols together. + + To use it, instantiate it with the receiving IProtocol, and then pass it to the + sending IProtocol's makeConnection method: + + server = HTTPChannel() + client.makeConnection(FakeTransport(server, self.reactor)) + + If you want bidirectional communication, you'll need two instances. + """ + + other = attr.ib() + """The Protocol object which will receive any data written to this transport. + + :type: twisted.internet.interfaces.IProtocol + """ + + _reactor = attr.ib() + """Test reactor + + :type: twisted.internet.interfaces.IReactorTime + """ + + disconnecting = False + buffer = attr.ib(default=b'') + producer = attr.ib(default=None) + + def getPeer(self): + return None + + def getHost(self): + return None + + def loseConnection(self): + self.disconnecting = True + + def abortConnection(self): + self.disconnecting = True + + def pauseProducing(self): + self.producer.pauseProducing() + + def unregisterProducer(self): + if not self.producer: + return + + self.producer = None + + def registerProducer(self, producer, streaming): + self.producer = producer + self.producerStreaming = streaming + + def _produce(): + d = self.producer.resumeProducing() + d.addCallback(lambda x: self._reactor.callLater(0.1, _produce)) + + if not streaming: + self._reactor.callLater(0.0, _produce) + + def write(self, byt): + self.buffer = self.buffer + byt + + def _write(): + if getattr(self.other, "transport") is not None: + self.other.dataReceived(self.buffer) + self.buffer = b"" + return + + self._reactor.callLater(0.0, _write) + + _write() + + def writeSequence(self, seq): + for x in seq: + self.write(x) -- cgit 1.5.1 From a219ce87263ad9be887cf039a04b4a1f06b7b0b8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Sep 2018 18:27:37 +0100 Subject: Use directory server for room joins (#3899) When we do a join, always try the server we used for the alias lookup first. Fixes #2418 --- changelog.d/3899.bugfix | 1 + synapse/handlers/room_member.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/3899.bugfix (limited to 'synapse') diff --git a/changelog.d/3899.bugfix b/changelog.d/3899.bugfix new file mode 100644 index 0000000000..5120e3a823 --- /dev/null +++ b/changelog.d/3899.bugfix @@ -0,0 +1 @@ +When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index f643619047..07fd3e82fc 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -583,6 +583,11 @@ class RoomMemberHandler(object): room_id = mapping["room_id"] servers = mapping["servers"] + # put the server which owns the alias at the front of the server list. + if room_alias.domain in servers: + servers.remove(room_alias.domain) + servers.insert(0, room_alias.domain) + defer.returnValue((RoomID.from_string(room_id), servers)) @defer.inlineCallbacks -- cgit 1.5.1 From 35aec19f0ac88e7b16bc6bcc4f4e546b7fc89589 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 18 Sep 2018 15:26:08 -0600 Subject: Destination is a string --- synapse/http/matrixfederationclient.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 6a2d447289..530c0245a9 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -150,14 +150,14 @@ def _handle_json_response(reactor, timeout_sec, request, response): body = yield make_deferred_yieldable(d) except Exception as e: logger.warn( - "{%s} [%d] Error reading response: %s", + "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( - "{%s} [%d] Completed: %d %s", + "{%s} [%s] Completed: %d %s", request.txn_id, request.destination, response.code, @@ -707,14 +707,14 @@ class MatrixFederationHttpClient(object): length = yield make_deferred_yieldable(d) except Exception as e: logger.warn( - "{%s} [%d] Error reading response: %s", + "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( - "{%s} [%d] Completed: %d %s [%d bytes]", + "{%s} [%s] Completed: %d %s [%d bytes]", request.txn_id, request.destination, response.code, -- cgit 1.5.1 From a334e1cacec5dd9c1a983415906d8ffea4a30134 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 10:39:40 +0100 Subject: Update to use new timeout function everywhere. The existing deferred timeout helper function (and the one into twisted) suffer from a bug when a deferred's canceller throws an exception, #3842. The new helper function doesn't suffer from this problem. --- synapse/http/client.py | 4 +- synapse/http/matrixfederationclient.py | 25 +++++++----- synapse/notifier.py | 13 +++--- synapse/util/async_helpers.py | 73 +++++++++------------------------- 4 files changed, 43 insertions(+), 72 deletions(-) (limited to 'synapse') diff --git a/synapse/http/client.py b/synapse/http/client.py index ec339a92ad..3d05f83b8c 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -43,7 +43,7 @@ from twisted.web.http_headers import Headers from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import cancelled_to_request_timed_out_error, redact_uri from synapse.http.endpoint import SpiderEndpoint -from synapse.util.async_helpers import add_timeout_to_deferred +from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable @@ -99,7 +99,7 @@ class SimpleHttpClient(object): request_deferred = treq.request( method, uri, agent=self.agent, data=data, headers=headers ) - add_timeout_to_deferred( + request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 530c0245a9..14b12cd1c4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -44,7 +44,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util.async_helpers import timeout_no_seriously +from synapse.util.async_helpers import timeout_deferred from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure @@ -145,8 +145,14 @@ def _handle_json_response(reactor, timeout_sec, request, response): """ try: check_content_type_is_json(response.headers) + d = treq.json_content(response) - d.addTimeout(timeout_sec, reactor) + d = timeout_deferred( + d, + timeout=timeout_sec, + reactor=reactor, + ) + body = yield make_deferred_yieldable(d) except Exception as e: logger.warn( @@ -321,15 +327,10 @@ class MatrixFederationHttpClient(object): reactor=self.hs.get_reactor(), unbuffered=True ) - request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) - # Sometimes the timeout above doesn't work, so lets hack yet - # another layer of timeouts in in the vain hope that at some - # point the world made sense and this really really really - # should work. - request_deferred = timeout_no_seriously( + request_deferred = timeout_deferred( request_deferred, - timeout=_sec_timeout * 2, + timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) @@ -388,7 +389,11 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? d = treq.content(response) - d.addTimeout(_sec_timeout, self.hs.get_reactor()) + d = timeout_deferred( + d, + timeout=_sec_timeout, + reactor=self.hs.get_reactor(), + ) body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body diff --git a/synapse/notifier.py b/synapse/notifier.py index 82f391481c..2d683718fb 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -28,7 +28,7 @@ from synapse.types import StreamToken from synapse.util.async_helpers import ( DeferredTimeoutError, ObservableDeferred, - add_timeout_to_deferred, + timeout_deferred, ) from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.logutils import log_function @@ -337,7 +337,7 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - add_timeout_to_deferred( + listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000., self.hs.get_reactor(), @@ -559,11 +559,12 @@ class Notifier(object): if end_time <= now: break - add_timeout_to_deferred( - listener.deferred.addTimeout, - (end_time - now) / 1000., - self.hs.get_reactor(), + listener.deferred = timeout_deferred( + listener.deferred, + timeout=(end_time - now) / 1000., + reactor=self.hs.get_reactor(), ) + try: with PreserveLoggingContext(): yield listener.deferred diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 40c2946129..2e12bcda98 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -380,23 +380,25 @@ class DeferredTimeoutError(Exception): """ -def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): - """ - Add a timeout to a deferred by scheduling it to be cancelled after - timeout seconds. +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise DeferredTimeoutError(timeout, "Deferred") + return value - This is essentially a backport of deferred.addTimeout, which was introduced - in twisted 16.5. - If the deferred gets timed out, it errbacks with a DeferredTimeoutError, - unless a cancelable function was passed to its initialization or unless - a different on_timeout_cancel callable is provided. +def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): + """The in built twisted `Deferred.addTimeout` fails to time out deferreds + that have a canceller that throws exceptions. This method creates a new + deferred that wraps and times out the given deferred, correctly handling + the case where the given deferred's canceller throws. - Args: - deferred (defer.Deferred): deferred to be timed out - timeout (Number): seconds to time out after - reactor (twisted.internet.reactor): the Twisted reactor to use + NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred + Args: + deferred (Deferred) + timeout (float): Timeout in seconds + reactor (twisted.internet.reactor): The twisted reactor to use on_timeout_cancel (callable): A callable which is called immediately after the deferred times out, and not if this deferred is otherwise cancelled before the timeout. @@ -407,47 +409,9 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): The default callable (if none is provided) will translate a CancelledError Failure into a DeferredTimeoutError. - """ - timed_out = [False] - def time_it_out(): - timed_out[0] = True - deferred.cancel() - - delayed_call = reactor.callLater(timeout, time_it_out) - - def convert_cancelled(value): - if timed_out[0]: - to_call = on_timeout_cancel or _cancelled_to_timed_out_error - return to_call(value, timeout) - return value - - deferred.addBoth(convert_cancelled) - - def cancel_timeout(result): - # stop the pending call to cancel the deferred if it's been fired - if delayed_call.active(): - delayed_call.cancel() - return result - - deferred.addBoth(cancel_timeout) - - -def _cancelled_to_timed_out_error(value, timeout): - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise DeferredTimeoutError(timeout, "Deferred") - return value - - -def timeout_no_seriously(deferred, timeout, reactor): - """The in build twisted deferred addTimeout (and the method above) - completely fail to time things out under some unknown circumstances. - - Lets try a different way of timing things out and maybe that will make - things work?! - - TODO: Kill this with fire. + Returns: + Deferred """ new_d = defer.Deferred() @@ -466,7 +430,8 @@ def timeout_no_seriously(deferred, timeout, reactor): def convert_cancelled(value): if timed_out[0]: - return _cancelled_to_timed_out_error(value, timeout) + to_call = on_timeout_cancel or _cancelled_to_timed_out_error + return to_call(value, timeout) return value deferred.addBoth(convert_cancelled) -- cgit 1.5.1 From 6c48aa0256ce7dfd62e72a77b03f5bb2222473ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:04:42 +0100 Subject: Run canceller first to allow it to generate correct error --- synapse/util/async_helpers.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2e12bcda98..2b2e85ecb7 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -421,11 +421,14 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): def time_it_out(): timed_out[0] = True + try: + deferred.cancel() + except: # noqa: E722, if we throw any exception it'll break time outs + logger.exception("Canceller failed during timeout") + if not new_d.called: new_d.errback(DeferredTimeoutError(timeout, "Deferred")) - deferred.cancel() - delayed_call = reactor.callLater(timeout, time_it_out) def convert_cancelled(value): -- cgit 1.5.1 From 9407bcf37a99ebb72eef73e19632cdcf5964c968 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:05:21 +0100 Subject: Replace custom DeferredTimeoutError with defer.TimeoutError --- synapse/notifier.py | 5 ++--- synapse/util/async_helpers.py | 12 +++--------- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'synapse') diff --git a/synapse/notifier.py b/synapse/notifier.py index 2d683718fb..2e7525b7d2 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -26,7 +26,6 @@ from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge from synapse.types import StreamToken from synapse.util.async_helpers import ( - DeferredTimeoutError, ObservableDeferred, timeout_deferred, ) @@ -354,7 +353,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break @@ -568,7 +567,7 @@ class Notifier(object): try: with PreserveLoggingContext(): yield listener.deferred - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2b2e85ecb7..ec7b2c9672 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -374,16 +374,10 @@ class ReadWriteLock(object): defer.returnValue(_ctx_manager()) -class DeferredTimeoutError(Exception): - """ - This error is raised by default when a L{Deferred} times out. - """ - - def _cancelled_to_timed_out_error(value, timeout): if isinstance(value, failure.Failure): value.trap(CancelledError) - raise DeferredTimeoutError(timeout, "Deferred") + raise defer.TimeoutError(timeout, "Deferred") return value @@ -408,7 +402,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): the timeout. The default callable (if none is provided) will translate a - CancelledError Failure into a DeferredTimeoutError. + CancelledError Failure into a defer.TimeoutError. Returns: Deferred @@ -427,7 +421,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): logger.exception("Canceller failed during timeout") if not new_d.called: - new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + new_d.errback(defer.TimeoutError(timeout, "Deferred")) delayed_call = reactor.callLater(timeout, time_it_out) -- cgit 1.5.1 From 80d2d50f4764d6662ceef86f834f845b539263e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 11:19:47 +0100 Subject: Fixup --- changelog.d/3910 | 1 - changelog.d/3910.bugfix | 1 + synapse/notifier.py | 5 +---- 3 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/3910 create mode 100644 changelog.d/3910.bugfix (limited to 'synapse') diff --git a/changelog.d/3910 b/changelog.d/3910 deleted file mode 100644 index 22ec2adc33..0000000000 --- a/changelog.d/3910 +++ /dev/null @@ -1 +0,0 @@ -Fix bug where things occaisonally were not being timed out correctly. diff --git a/changelog.d/3910.bugfix b/changelog.d/3910.bugfix new file mode 100644 index 0000000000..22ec2adc33 --- /dev/null +++ b/changelog.d/3910.bugfix @@ -0,0 +1 @@ +Fix bug where things occaisonally were not being timed out correctly. diff --git a/synapse/notifier.py b/synapse/notifier.py index 2e7525b7d2..f1d92c1395 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -25,10 +25,7 @@ from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge from synapse.types import StreamToken -from synapse.util.async_helpers import ( - ObservableDeferred, - timeout_deferred, -) +from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.logutils import log_function from synapse.util.metrics import Measure -- cgit 1.5.1 From b9158ac2bf46e15107162be136964c17bd8a948c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:16:34 +0100 Subject: Remove get_destination_retry_timings cache Currently we rely on the master to invalidate this cache promptly. However, after having moved most federation endpoints off of master this no longer happens, causing outbound fedeariont to get blackholed. Fixes #3798 --- synapse/storage/transactions.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 0c42bd3322..7d51f9612a 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -156,7 +156,6 @@ class TransactionStore(SQLBaseStore): """ pass - @cached(max_entries=10000) def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -212,10 +211,6 @@ class TransactionStore(SQLBaseStore): retry_last_ts, retry_interval): self.database_engine.lock_table(txn, "destinations") - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - # We need to be careful here as the data may have changed from under us # due to a worker setting the timings. -- cgit 1.5.1 From 392a54128c03128255ee538e17ed9bd87b95d4ca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:37:49 +0100 Subject: pep8 --- synapse/storage/transactions.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 7d51f9612a..449bdc1e49 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json -- cgit 1.5.1 From bbab6ebfd9257ba9ad5e576c4b9b8bdea1c166cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Sep 2018 14:45:14 +0100 Subject: Fix up changelog and remove spurious comment --- changelog.d/3914.bugfix | 2 +- synapse/storage/transactions.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'synapse') diff --git a/changelog.d/3914.bugfix b/changelog.d/3914.bugfix index 60fb8ec1ae..27e6bad590 100644 --- a/changelog.d/3914.bugfix +++ b/changelog.d/3914.bugfix @@ -1 +1 @@ -Fix bug where outboud federation would stop talking to some servers when using workers +Fix bug where outbound federation would stop talking to some servers when using workers diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 449bdc1e49..baf0379a68 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -196,8 +196,6 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - # XXX: we could chose to not bother persisting this if our cache thinks - # this is a NOOP return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, -- cgit 1.5.1 From 642199570c57fd37b856c3808e08fe2923001b55 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 19 Sep 2018 17:28:18 +0100 Subject: Improve the logging when handling a federation transaction (#3904) Let's try to rationalise the logging that happens when we are processing an incoming transaction, to make it easier to figure out what is going wrong when they take ages. In particular: - make everything start with a [room_id event_id] prefix - make sure we log a warning when catching exceptions rather than just turning them into other, more cryptic, exceptions. --- changelog.d/3904.misc | 1 + synapse/handlers/federation.py | 164 +++++++++++++++++++++++++++-------------- synapse/util/retryutils.py | 2 +- 3 files changed, 111 insertions(+), 56 deletions(-) create mode 100644 changelog.d/3904.misc (limited to 'synapse') diff --git a/changelog.d/3904.misc b/changelog.d/3904.misc new file mode 100644 index 0000000000..1e3c8e1706 --- /dev/null +++ b/changelog.d/3904.misc @@ -0,0 +1 @@ +Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index f10b46414b..8d6bd7976d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -69,6 +69,27 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +def shortstr(iterable, maxitems=5): + """If iterable has maxitems or fewer, return the stringification of a list + containing those items. + + Otherwise, return the stringification of a a list with the first maxitems items, + followed by "...". + + Args: + iterable (Iterable): iterable to truncate + maxitems (int): number of items to return before truncating + + Returns: + unicode + """ + + items = list(itertools.islice(iterable, maxitems + 1)) + if len(items) <= maxitems: + return str(items) + return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]" + + class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: @@ -114,7 +135,6 @@ class FederationHandler(BaseHandler): self._room_pdu_linearizer = Linearizer("fed_room_pdu") @defer.inlineCallbacks - @log_function def on_receive_pdu( self, origin, pdu, get_missing=True, sent_to_us_directly=False, ): @@ -130,9 +150,17 @@ class FederationHandler(BaseHandler): Returns (Deferred): completes with None """ + room_id = pdu.room_id + event_id = pdu.event_id + + logger.info( + "[%s %s] handling received PDU: %s", + room_id, event_id, pdu, + ) + # We reprocess pdus when we have seen them only as outliers existing = yield self.store.get_event( - pdu.event_id, + event_id, allow_none=True, allow_rejected=True, ) @@ -147,7 +175,7 @@ class FederationHandler(BaseHandler): ) ) if already_seen: - logger.debug("Already seen pdu %s", pdu.event_id) + logger.debug("[%s %s]: Already seen pdu", room_id, event_id) return # do some initial sanity-checking of the event. In particular, make @@ -156,6 +184,7 @@ class FederationHandler(BaseHandler): try: self._sanity_check_event(pdu) except SynapseError as err: + logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id) raise FederationError( "ERROR", err.code, @@ -165,10 +194,12 @@ class FederationHandler(BaseHandler): # If we are currently in the process of joining this room, then we # queue up events for later processing. - if pdu.room_id in self.room_queues: - logger.info("Ignoring PDU %s for room %s from %s for now; join " - "in progress", pdu.event_id, pdu.room_id, origin) - self.room_queues[pdu.room_id].append((pdu, origin)) + if room_id in self.room_queues: + logger.info( + "[%s %s] Queuing PDU from %s for now: join in progress", + room_id, event_id, origin, + ) + self.room_queues[room_id].append((pdu, origin)) return # If we're no longer in the room just ditch the event entirely. This @@ -179,7 +210,7 @@ class FederationHandler(BaseHandler): # we should check if we *are* in fact in the room. If we are then we # can magically rejoin the room. is_in_room = yield self.auth.check_host_in_room( - pdu.room_id, + room_id, self.server_name ) if not is_in_room: @@ -188,8 +219,8 @@ class FederationHandler(BaseHandler): ) if was_in_room: logger.info( - "Ignoring PDU %s for room %s from %s as we've left the room!", - pdu.event_id, pdu.room_id, origin, + "[%s %s] Ignoring PDU from %s as we've left the room", + room_id, event_id, origin, ) defer.returnValue(None) @@ -204,8 +235,8 @@ class FederationHandler(BaseHandler): ) logger.debug( - "_handle_new_pdu min_depth for %s: %d", - pdu.room_id, min_depth + "[%s %s] min_depth: %d", + room_id, event_id, min_depth, ) prevs = {e_id for e_id, _ in pdu.prev_events} @@ -218,17 +249,18 @@ class FederationHandler(BaseHandler): # send to the clients. pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: - if get_missing and prevs - seen: + missing_prevs = prevs - seen + if get_missing and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( - "Acquiring lock for room %r to fetch %d missing events: %r...", - pdu.room_id, len(prevs - seen), list(prevs - seen)[:5], + "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) with (yield self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( - "Acquired lock for room %r to fetch %d missing events", - pdu.room_id, len(prevs - seen), + "[%s %s] Acquired room lock to fetch %d missing prev_events", + room_id, event_id, len(missing_prevs), ) yield self._get_missing_events_for_pdu( @@ -241,19 +273,23 @@ class FederationHandler(BaseHandler): if not prevs - seen: logger.info( - "Found all missing prev events for %s", pdu.event_id + "[%s %s] Found all missing prev_events", + room_id, event_id, ) - elif prevs - seen: + elif missing_prevs: logger.info( - "Not fetching %d missing events for room %r,event %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, - list(prevs - seen)[:5], + "[%s %s] Not recursively fetching %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) if sent_to_us_directly and prevs - seen: # If they have sent it to us directly, and the server # isn't telling us about the auth events that it's # made a message referencing, we explode + logger.warn( + "[%s %s] Failed to fetch %d prev events: rejecting", + room_id, event_id, len(prevs - seen), + ) raise FederationError( "ERROR", 403, @@ -270,15 +306,19 @@ class FederationHandler(BaseHandler): auth_chains = set() try: # Get the state of the events we know about - ours = yield self.store.get_state_groups(pdu.room_id, list(seen)) + ours = yield self.store.get_state_groups(room_id, list(seen)) state_groups.append(ours) # Ask the remote server for the states we don't # know about for p in prevs - seen: + logger.info( + "[%s %s] Requesting state at missing prev_event %s", + room_id, event_id, p, + ) state, got_auth_chain = ( yield self.federation_client.get_state_for_room( - origin, pdu.room_id, p + origin, room_id, p, ) ) auth_chains.update(got_auth_chain) @@ -291,19 +331,24 @@ class FederationHandler(BaseHandler): ev_ids, get_prev_content=False, check_redacted=False ) - room_version = yield self.store.get_room_version(pdu.room_id) + room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_groups, {pdu.event_id: pdu}, fetch + room_version, state_groups, {event_id: pdu}, fetch ) state = (yield self.store.get_events(state_map.values())).values() auth_chain = list(auth_chains) except Exception: + logger.warn( + "[%s %s] Error attempting to resolve state at missing " + "prev_events", + room_id, event_id, exc_info=True, + ) raise FederationError( "ERROR", 403, "We can't get valid state history.", - affected=pdu.event_id, + affected=event_id, ) yield self._process_received_pdu( @@ -322,15 +367,16 @@ class FederationHandler(BaseHandler): prevs (set(str)): List of event ids which we are missing min_depth (int): Minimum depth of events to return. """ - # We recalculate seen, since it may have changed. + + room_id = pdu.room_id + event_id = pdu.event_id + seen = yield self.store.have_seen_events(prevs) if not prevs - seen: return - latest = yield self.store.get_latest_event_ids_in_room( - pdu.room_id - ) + latest = yield self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us @@ -338,8 +384,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "Missing %d events for room %r pdu %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5] + "[%s %s]: Requesting %d prev_events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) # XXX: we set timeout to 10s to help workaround @@ -392,7 +438,7 @@ class FederationHandler(BaseHandler): missing_events = yield self.federation_client.get_missing_events( origin, - pdu.room_id, + room_id, earliest_events_ids=list(latest), latest_events=[pdu], limit=10, @@ -401,37 +447,46 @@ class FederationHandler(BaseHandler): ) logger.info( - "Got %d events: %r...", - len(missing_events), [e.event_id for e in missing_events[:5]] + "[%s %s]: Got %d prev_events: %s", + room_id, event_id, len(missing_events), shortstr(missing_events), ) # We want to sort these by depth so we process them and # tell clients about them in order. missing_events.sort(key=lambda x: x.depth) - for e in missing_events: - logger.info("Handling found event %s", e.event_id) + for ev in missing_events: + logger.info( + "[%s %s] Handling received prev_event %s", + room_id, event_id, ev.event_id, + ) try: yield self.on_receive_pdu( origin, - e, + ev, get_missing=False ) except FederationError as e: if e.code == 403: - logger.warn("Event %s failed history check.") + logger.warn( + "[%s %s] Received prev_event %s failed history check.", + room_id, event_id, ev.event_id, + ) else: raise - @log_function @defer.inlineCallbacks - def _process_received_pdu(self, origin, pdu, state, auth_chain): + def _process_received_pdu(self, origin, event, state, auth_chain): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. """ - event = pdu + room_id = event.room_id + event_id = event.event_id - logger.debug("Processing event: %s", event) + logger.debug( + "[%s %s] Processing event: %s", + room_id, event_id, event, + ) # FIXME (erikj): Awful hack to make the case where we are not currently # in the room work @@ -440,15 +495,16 @@ class FederationHandler(BaseHandler): # event. if state and auth_chain and not event.internal_metadata.is_outlier(): is_in_room = yield self.auth.check_host_in_room( - event.room_id, + room_id, self.server_name ) else: is_in_room = True + if not is_in_room: logger.info( - "Got event for room we're not in: %r %r", - event.room_id, event.event_id + "[%s %s] Got event for room we're not in", + room_id, event_id, ) try: @@ -460,7 +516,7 @@ class FederationHandler(BaseHandler): "ERROR", e.code, e.msg, - affected=event.event_id, + affected=event_id, ) else: @@ -509,12 +565,12 @@ class FederationHandler(BaseHandler): affected=event.event_id, ) - room = yield self.store.get_room(event.room_id) + room = yield self.store.get_room(room_id) if not room: try: yield self.store.store_room( - room_id=event.room_id, + room_id=room_id, room_creator_user_id="", is_public=False, ) @@ -542,7 +598,7 @@ class FederationHandler(BaseHandler): if newly_joined: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, event.room_id) + yield self.user_joined_room(user, room_id) @log_function @defer.inlineCallbacks @@ -1459,12 +1515,10 @@ class FederationHandler(BaseHandler): else: defer.returnValue(None) - @log_function def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @defer.inlineCallbacks - @log_function def _handle_new_event(self, origin, event, state=None, auth_events=None, backfilled=False): context = yield self._prep_event( @@ -1664,8 +1718,8 @@ class FederationHandler(BaseHandler): ) except AuthError as e: logger.warn( - "Rejecting %s because %s", - event.event_id, e.msg + "[%s %s] Rejecting: %s", + event.room_id, event.event_id, e.msg ) context.rejected = RejectedReason.AUTH_ERROR diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 8a3a06fd74..26cce7d197 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -188,7 +188,7 @@ class RetryDestinationLimiter(object): else: self.retry_interval = self.min_retry_interval - logger.debug( + logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", self.destination, exc_type, exc_val, self.retry_interval ) -- cgit 1.5.1 From aeca5a5ed5816a7797a0807142a41e14baac80f6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Sep 2018 16:28:18 +1000 Subject: Add a regression test for logging on failed connections (#3912) --- changelog.d/3912.misc | 1 + synapse/http/site.py | 4 +-- tests/test_server.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++-- tests/unittest.py | 3 ++- 4 files changed, 77 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3912.misc (limited to 'synapse') diff --git a/changelog.d/3912.misc b/changelog.d/3912.misc new file mode 100644 index 0000000000..87d73697ea --- /dev/null +++ b/changelog.d/3912.misc @@ -0,0 +1 @@ +Add a regression test for logging failed HTTP requests on Python 3. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index 9579e8cd0d..d41d672934 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -75,9 +75,9 @@ class SynapseRequest(Request): return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), - self.method, + self.method.decode('ascii', errors='replace'), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), self.site.site_tag, ) diff --git a/tests/test_server.py b/tests/test_server.py index ef74544e93..4045fdadc3 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -1,14 +1,35 @@ +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging import re +from six import StringIO + from twisted.internet.defer import Deferred -from twisted.test.proto_helpers import MemoryReactorClock +from twisted.python.failure import Failure +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource +from synapse.http.site import SynapseSite, logger from synapse.util import Clock from tests import unittest -from tests.server import make_request, render, setup_test_homeserver +from tests.server import FakeTransport, make_request, render, setup_test_homeserver class JsonResourceTests(unittest.TestCase): @@ -121,3 +142,52 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.result["code"], b'400') self.assertEqual(channel.json_body["error"], "Unrecognized request") self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") + + +class SiteTestCase(unittest.HomeserverTestCase): + def test_lose_connection(self): + """ + We log the URI correctly redacted when we lose the connection. + """ + + class HangingResource(Resource): + """ + A Resource that strategically hangs, as if it were processing an + answer. + """ + + def render(self, request): + return NOT_DONE_YET + + # Set up a logging handler that we can inspect afterwards + output = StringIO() + handler = logging.StreamHandler(output) + logger.addHandler(handler) + old_level = logger.level + logger.setLevel(10) + self.addCleanup(logger.setLevel, old_level) + self.addCleanup(logger.removeHandler, handler) + + # Make a resource and a Site, the resource will hang and allow us to + # time out the request while it's 'processing' + base_resource = Resource() + base_resource.putChild(b'', HangingResource()) + site = SynapseSite("test", "site_tag", {}, base_resource, "1.0") + + server = site.buildProtocol(None) + client = AccumulatingProtocol() + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + # Send a request with an access token that will get redacted + server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n") + self.pump() + + # Lose the connection + e = Failure(Exception("Failed123")) + server.connectionLost(e) + handler.flush() + + # Our access token is redacted and the failure reason is logged. + self.assertIn("/?access_token=", output.getvalue()) + self.assertIn("Failed123", output.getvalue()) diff --git a/tests/unittest.py b/tests/unittest.py index a3d39920db..56f3dca394 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -219,7 +219,8 @@ class HomeserverTestCase(TestCase): Function to be overridden in subclasses. """ - raise NotImplementedError() + hs = self.setup_test_homeserver() + return hs def prepare(self, reactor, clock, homeserver): """ -- cgit 1.5.1 From 1f3f5fcf52efdf5215dc6c7cd3805b1357bf8236 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Sep 2018 20:14:34 +1000 Subject: Fix client IPs being broken on Python 3 (#3908) --- .travis.yml | 5 + changelog.d/3908.bugfix | 1 + synapse/http/site.py | 2 +- synapse/storage/client_ips.py | 34 ++++--- tests/server.py | 8 +- tests/storage/test_client_ips.py | 202 ++++++++++++++++++++++++++++++++------- tests/unittest.py | 7 +- tests/utils.py | 27 +++++- tox.ini | 10 ++ 9 files changed, 238 insertions(+), 58 deletions(-) create mode 100644 changelog.d/3908.bugfix (limited to 'synapse') diff --git a/.travis.yml b/.travis.yml index b3ee66da8f..b6faca4b92 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,11 @@ matrix: - python: 3.6 env: TOX_ENV=py36 + - python: 3.6 + env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" + services: + - postgresql + - python: 3.6 env: TOX_ENV=check_isort diff --git a/changelog.d/3908.bugfix b/changelog.d/3908.bugfix new file mode 100644 index 0000000000..518aee6c4d --- /dev/null +++ b/changelog.d/3908.bugfix @@ -0,0 +1 @@ +Fix adding client IPs to the database failing on Python 3. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index d41d672934..50be2de3bb 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -308,7 +308,7 @@ class XForwardedForRequest(SynapseRequest): C{b"-"}. """ return self.requestHeaders.getRawHeaders( - b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip() + b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip().decode('ascii') class SynapseRequestFactory(object): diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 8fc678fa67..9ad17b7c25 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -119,21 +119,25 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): for entry in iteritems(to_update): (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry - self._simple_upsert_txn( - txn, - table="user_ips", - keyvalues={ - "user_id": user_id, - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "device_id": device_id, - }, - values={ - "last_seen": last_seen, - }, - lock=False, - ) + try: + self._simple_upsert_txn( + txn, + table="user_ips", + keyvalues={ + "user_id": user_id, + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "device_id": device_id, + }, + values={ + "last_seen": last_seen, + }, + lock=False, + ) + except Exception as e: + # Failed to upsert, log and continue + logger.error("Failed to insert client IP %r: %r", entry, e) @defer.inlineCallbacks def get_last_client_ip_by_device(self, user_id, device_id): diff --git a/tests/server.py b/tests/server.py index ccea3baa55..7bee58dff1 100644 --- a/tests/server.py +++ b/tests/server.py @@ -98,7 +98,7 @@ class FakeSite: return FakeLogger() -def make_request(method, path, content=b"", access_token=None): +def make_request(method, path, content=b"", access_token=None, request=SynapseRequest): """ Make a web request using the given method and path, feed it the content, and return the Request and the Channel underneath. @@ -120,14 +120,16 @@ def make_request(method, path, content=b"", access_token=None): site = FakeSite() channel = FakeChannel() - req = SynapseRequest(site, channel) + req = request(site, channel) req.process = lambda: b"" req.content = BytesIO(content) if access_token: req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) - req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1") + if content: + req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + req.requestReceived(method, path, b"1.1") return req, channel diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c9b02a062b..2ffbb9f14f 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,35 +13,45 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import hashlib +import hmac +import json + from mock import Mock from twisted.internet import defer -import tests.unittest -import tests.utils +from synapse.http.site import XForwardedForRequest +from synapse.rest.client.v1 import admin, login + +from tests import unittest -class ClientIpStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(ClientIpStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore - self.clock = None # type: tests.utils.MockClock +class ClientIpStoreTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs - @defer.inlineCallbacks - def setUp(self): - self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup) + def prepare(self, hs, reactor, clock): self.store = self.hs.get_datastore() - self.clock = self.hs.get_clock() - @defer.inlineCallbacks def test_insert_new_client_ip(self): - self.clock.now = 12345678 + self.reactor.advance(12345678) + user_id = "@user:id" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - result = yield self.store.get_last_client_ip_by_device(user_id, "device_id") + # Trigger the storage loop + self.reactor.advance(10) + + result = self.get_success( + self.store.get_last_client_ip_by_device(user_id, "device_id") + ) r = result[(user_id, "device_id")] self.assertDictContainsSubset( @@ -55,18 +66,18 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): r, ) - @defer.inlineCallbacks def test_disabled_monthly_active_user(self): self.hs.config.limit_usage_by_mau = False self.hs.config.max_mau_value = 50 user_id = "@user:server" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_full(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 @@ -76,38 +87,159 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.store.get_monthly_active_count = Mock( return_value=defer.succeed(lots_of_users) ) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) - @defer.inlineCallbacks def test_updating_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - yield self.store.register(user_id=user_id, token="123", password_hash=None) + self.get_success( + self.store.register(user_id=user_id, token="123", password_hash=None) + ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) + + +class ClientIpAuthTestCase(unittest.HomeserverTestCase): + + servlets = [admin.register_servlets, login.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.hs.config.registration_shared_secret = u"shared" + self.store = self.hs.get_datastore() + + # Create the user + request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": "bob", + "password": "abc123", + "admin": True, + "mac": want_mac, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') + ) + self.render(request) + + self.assertEqual(channel.code, 200) + self.user_id = channel.json_body["user_id"] + + def test_request_with_xforwarded(self): + """ + The IP in X-Forwarded-For is entered into the client IPs table. + """ + self._runtest( + {b"X-Forwarded-For": b"127.9.0.1"}, + "127.9.0.1", + {"request": XForwardedForRequest}, + ) + + def test_request_from_getPeer(self): + """ + The IP returned by getPeer is entered into the client IPs table, if + there's no X-Forwarded-For header. + """ + self._runtest({}, "127.0.0.1", {}) + + def _runtest(self, headers, expected_ip, make_request_args): + device_id = "bleb" + + body = json.dumps( + { + "type": "m.login.password", + "user": "bob", + "password": "abc123", + "device_id": device_id, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", body.encode('utf8'), **make_request_args + ) + self.render(request) + self.assertEqual(channel.code, 200) + access_token = channel.json_body["access_token"].encode('ascii') + + # Advance to a known time + self.reactor.advance(123456 - self.reactor.seconds()) + + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/admin/users/" + self.user_id, + body.encode('utf8'), + access_token=access_token, + **make_request_args + ) + request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza") + + # Add the optional headers + for h, v in headers.items(): + request.requestHeaders.addRawHeader(h, v) + self.render(request) + + # Advance so the save loop occurs + self.reactor.advance(100) + + result = self.get_success( + self.store.get_last_client_ip_by_device(self.user_id, device_id) + ) + r = result[(self.user_id, device_id)] + self.assertDictContainsSubset( + { + "user_id": self.user_id, + "device_id": device_id, + "ip": expected_ip, + "user_agent": "Mozzila pizza", + "last_seen": 123456100, + }, + r, + ) diff --git a/tests/unittest.py b/tests/unittest.py index 56f3dca394..ef905e6389 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -26,6 +26,7 @@ from twisted.internet.defer import Deferred from twisted.trial import unittest from synapse.http.server import JsonResource +from synapse.http.site import SynapseRequest from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util.logcontext import LoggingContextFilter @@ -237,7 +238,9 @@ class HomeserverTestCase(TestCase): Function to optionally be overridden in subclasses. """ - def make_request(self, method, path, content=b""): + def make_request( + self, method, path, content=b"", access_token=None, request=SynapseRequest + ): """ Create a SynapseRequest at the path using the method and containing the given content. @@ -255,7 +258,7 @@ class HomeserverTestCase(TestCase): if isinstance(content, dict): content = json.dumps(content).encode('utf8') - return make_request(method, path, content) + return make_request(method, path, content, access_token, request) def render(self, request): """ diff --git a/tests/utils.py b/tests/utils.py index 215226debf..aaed1149c3 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,7 +16,9 @@ import atexit import hashlib import os +import time import uuid +import warnings from inspect import getcallargs from mock import Mock, patch @@ -237,20 +239,41 @@ def setup_test_homeserver( else: # We need to do cleanup on PostgreSQL def cleanup(): + import psycopg2 + # Close all the db pools hs.get_db_pool().close() + dropped = False + # Drop the test database db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER ) db_conn.autocommit = True cur = db_conn.cursor() - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() + + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for x in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + cur.close() db_conn.close() + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + if not LEAVE_DB: # Register the cleanup hook cleanup_func(cleanup) diff --git a/tox.ini b/tox.ini index 88875c3318..e4db563b4b 100644 --- a/tox.ini +++ b/tox.ini @@ -70,6 +70,16 @@ usedevelop=true [testenv:py36] usedevelop=true +[testenv:py36-postgres] +usedevelop=true +deps = + {[base]deps} + psycopg2 +setenv = + {[base]setenv} + SYNAPSE_POSTGRES = 1 + + [testenv:packaging] deps = check-manifest -- cgit 1.5.1 From 703de4ec13b95f0c8f2e04bf8c20b3906b010592 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 20 Sep 2018 13:06:55 +0100 Subject: Comments and interface cleanup for on_receive_pdu Add some informative comments about what's going on here. Also, `sent_to_us_directly` and `get_missing` were doing the same thing (apart from in `_handle_queued_pdus`, which looks like a bug), so let's get rid of `get_missing` and use `sent_to_us_directly` consistently. --- synapse/federation/federation_server.py | 2 +- synapse/handlers/federation.py | 69 ++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 24 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index dbee404ea7..9a571e4fc7 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -618,7 +618,7 @@ class FederationServer(FederationBase): ) yield self.handler.on_receive_pdu( - origin, pdu, get_missing=True, sent_to_us_directly=True, + origin, pdu, sent_to_us_directly=True, ) def __str__(self): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8d6bd7976d..2ccdc3bfa7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -136,7 +136,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_receive_pdu( - self, origin, pdu, get_missing=True, sent_to_us_directly=False, + self, origin, pdu, sent_to_us_directly=False, ): """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events @@ -145,7 +145,8 @@ class FederationHandler(BaseHandler): origin (str): server which initiated the /send/ transaction. Will be used to fetch missing events or state. pdu (FrozenEvent): received PDU - get_missing (bool): True if we should fetch missing prev_events + sent_to_us_directly (bool): True if this event was pushed to us; False if + we pulled it as the result of a missing prev_event. Returns (Deferred): completes with None """ @@ -250,7 +251,7 @@ class FederationHandler(BaseHandler): pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: missing_prevs = prevs - seen - if get_missing and missing_prevs: + if sent_to_us_directly and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( @@ -282,24 +283,46 @@ class FederationHandler(BaseHandler): room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) - if sent_to_us_directly and prevs - seen: - # If they have sent it to us directly, and the server - # isn't telling us about the auth events that it's - # made a message referencing, we explode - logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), - ) - raise FederationError( - "ERROR", - 403, - ( - "Your server isn't divulging details about prev_events " - "referenced in this event." - ), - affected=pdu.event_id, - ) - elif prevs - seen: + if prevs - seen: + # We've still not been able to get all of the prev_events for this event. + # + # In this case, we need to fall back to asking another server in the + # federation for the state at this event. That's ok provided we then + # resolve the state against other bits of the DAG before using it (which + # will ensure that you can't just take over a room by sending an event, + # withholding its prev_events, and declaring yourself to be an admin in + # the subsequent state request). + # + # Now, if we're pulling this event as a missing prev_event, then clearly + # this event is not going to become the only forward-extremity and we are + # guaranteed to resolve its state against our existing forward + # extremities, so that should be fine. + # + # On the other hand, if this event was pushed to us, it is possible for + # it to become the only forward-extremity in the room, and we would then + # trust its state to be the state for the whole room. This is very bad. + # Further, if the event was pushed to us, there is no excuse for us not to + # have all the prev_events. We therefore reject any such events. + # + # XXX this really feels like it could/should be merged with the above, + # but there is an interaction with min_depth that I'm not really + # following. + + if sent_to_us_directly: + logger.warn( + "[%s %s] Failed to fetch %d prev events: rejecting", + room_id, event_id, len(prevs - seen), + ) + raise FederationError( + "ERROR", + 403, + ( + "Your server isn't divulging details about prev_events " + "referenced in this event." + ), + affected=pdu.event_id, + ) + # Calculate the state of the previous events, and # de-conflict them to find the current state. state_groups = [] @@ -464,7 +487,7 @@ class FederationHandler(BaseHandler): yield self.on_receive_pdu( origin, ev, - get_missing=False + sent_to_us_directly=False, ) except FederationError as e: if e.code == 403: @@ -1112,7 +1135,7 @@ class FederationHandler(BaseHandler): try: logger.info("Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id) - yield self.on_receive_pdu(origin, p) + yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( "Error handling queued PDU %s from %s: %s", -- cgit 1.5.1 From b28a7ed5030148139af6c6c03e635b068a6eae15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 13:44:20 +0100 Subject: Fix spurious exceptions when client closes conncetion If a HTTP handler throws an exception while processing a request we automatically write a JSON error response. If the handler had already started writing a response twisted throws an exception. We should check for this case and simple abort the connection if there was an error after the response had started being written. --- synapse/http/server.py | 49 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 14 deletions(-) (limited to 'synapse') diff --git a/synapse/http/server.py b/synapse/http/server.py index 2d5c23e673..b4b25cab19 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -84,10 +84,21 @@ def wrap_json_request_handler(h): logger.info( "%s SynapseError: %s - %s", request, code, e.msg ) - respond_with_json( - request, code, e.error_dict(), send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, code, e.error_dict(), send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) except Exception: # failure.Failure() fishes the original Failure out @@ -100,16 +111,26 @@ def wrap_json_request_handler(h): request, f.getTraceback().rstrip(), ) - respond_with_json( - request, - 500, - { - "error": "Internal server error", - "errcode": Codes.UNKNOWN, - }, - send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, + 500, + { + "error": "Internal server error", + "errcode": Codes.UNKNOWN, + }, + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) return wrap_async_request_handler(wrapped_request_handler) -- cgit 1.5.1 From 9ea408441f527064aeafb5a7076a10a15f2c580c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 16:15:21 +0100 Subject: Handle exceptions thrown by background tasks Fixes #3921 --- synapse/metrics/background_process_metrics.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse') diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 167167be0a..e5293587e3 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -191,6 +191,8 @@ def run_as_background_process(desc, func, *args, **kwargs): try: yield func(*args, **kwargs) + except Exception: + logger.exception("Background process '%s' threw an exception", desc) finally: proc.update_metrics() -- cgit 1.5.1 From 94ae1dea3cf746ba5f26b1ab57f945cb2e734ba1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Sep 2018 17:05:34 +0100 Subject: Add missing logger --- synapse/metrics/background_process_metrics.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse') diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index e5293587e3..173908299c 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import threading import six @@ -23,6 +24,9 @@ from twisted.internet import defer from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +logger = logging.getLogger(__name__) + + _background_process_start_count = Counter( "synapse_background_process_start_count", "Number of background processes started", -- cgit 1.5.1 From a2ddaa90f21074a659abde45b712ab39324e97e4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 20 Sep 2018 21:21:54 +0100 Subject: Always LL ourselves if we're in a room to simplify clients (#3916) Should fix https://github.com/vector-im/riot-web/issues/7209 --- changelog.d/3916.feature | 1 + synapse/handlers/sync.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/3916.feature (limited to 'synapse') diff --git a/changelog.d/3916.feature b/changelog.d/3916.feature new file mode 100644 index 0000000000..13282d992b --- /dev/null +++ b/changelog.d/3916.feature @@ -0,0 +1 @@ +Always LL ourselves if we're in a room diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9bca4e7067..b598916b21 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -713,6 +713,10 @@ class SyncHandler(object): ) ] + # always make sure we LL ourselves so we know we're in the room + # (if we are), to fix https://github.com/vector-im/riot-web/issues/7209 + types.append((EventTypes.Member, sync_config.user.to_string())) + # only apply the filtering to room members filtered_types = [EventTypes.Member] -- cgit 1.5.1 From 8601c24287c452c0d9a803b130e0f68cf6169f6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:19:46 +0100 Subject: Fix some instances of ExpiringCache not expiring cache items ExpiringCache required that `start()` be called before it would actually start expiring entries. A number of places didn't do that. This PR removes `start` from ExpiringCache, and automatically starts backround reaping process on creation instead. --- synapse/app/appservice.py | 1 - synapse/app/client_reader.py | 1 - synapse/app/event_creator.py | 1 - synapse/app/federation_reader.py | 1 - synapse/app/federation_sender.py | 1 - synapse/app/frontend_proxy.py | 1 - synapse/app/homeserver.py | 1 - synapse/app/media_repository.py | 1 - synapse/app/pusher.py | 1 - synapse/app/synchrotron.py | 1 - synapse/app/user_dir.py | 1 - synapse/federation/federation_client.py | 28 ++++++++++++--------------- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/state/__init__.py | 9 --------- synapse/util/caches/expiringcache.py | 1 - tests/util/test_expiring_cache.py | 1 - 16 files changed, 12 insertions(+), 39 deletions(-) (limited to 'synapse') diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 86b5067400..02039f7e79 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -172,7 +172,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ce2b113dbb..4c73c637bb 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -181,7 +181,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index f98e456ea0..bc82197b2a 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -199,7 +199,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 60f5973505..18ca71ef99 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -168,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 60dd09aac3..6501c57792 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -201,7 +201,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) _base.start_worker_reactor("synapse-federation-sender", config) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8c0b9c67b0..b076fbe522 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -258,7 +258,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3241ded188..8c5d858b0b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -384,7 +384,6 @@ def setup(config_options): def start(): hs.get_pusherpool().start() - hs.get_state_handler().start_caching() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() hs.get_federation_client().start_get_pdu_cache() diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index e3dbb3b4e6..992d182dba 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -168,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 244c604de9..2ec4c7defb 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -228,7 +228,6 @@ def start(config_options): def start(): ps.get_pusherpool().start() ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 6662340797..df81b7bcbe 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -435,7 +435,6 @@ def start(config_options): def start(): ss.get_datastore().start_profiling() - ss.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 96ffcaf073..b383e79c1c 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -229,7 +229,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index fe67b2ff42..5a92428f56 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -66,6 +66,14 @@ class FederationClient(FederationBase): self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() + self._get_pdu_cache = ExpiringCache( + cache_name="get_pdu_cache", + clock=self._clock, + max_len=1000, + expiry_ms=120 * 1000, + reset_expiry_on_get=False, + ) + def _clear_tried_cache(self): """Clear pdu_destination_tried cache""" now = self._clock.time_msec() @@ -82,17 +90,6 @@ class FederationClient(FederationBase): if destination_dict: self.pdu_destination_tried[event_id] = destination_dict - def start_get_pdu_cache(self): - self._get_pdu_cache = ExpiringCache( - cache_name="get_pdu_cache", - clock=self._clock, - max_len=1000, - expiry_ms=120 * 1000, - reset_expiry_on_get=False, - ) - - self._get_pdu_cache.start() - @log_function def make_query(self, destination, query_type, args, retry_on_dns_fail=False, ignore_backoff=False): @@ -229,10 +226,9 @@ class FederationClient(FederationBase): # TODO: Rate limit the number of times we try and get the same event. - if self._get_pdu_cache: - ev = self._get_pdu_cache.get(event_id) - if ev: - defer.returnValue(ev) + ev = self._get_pdu_cache.get(event_id) + if ev: + defer.returnValue(ev) pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -285,7 +281,7 @@ class FederationClient(FederationBase): ) continue - if self._get_pdu_cache is not None and signed_pdu: + if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu defer.returnValue(signed_pdu) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index cad2dec33a..af01040a38 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -79,7 +79,6 @@ class PreviewUrlResource(Resource): # don't spider URLs more often than once an hour expiry_ms=60 * 60 * 1000, ) - self._cache.start() self._cleaner_loop = self.clock.looping_call( self._start_expire_url_cache_data, 10 * 1000, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index d7ae22a661..b22495c1f9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -95,10 +95,6 @@ class StateHandler(object): self.hs = hs self._state_resolution_handler = hs.get_state_resolution_handler() - def start_caching(self): - # TODO: remove this shim - self._state_resolution_handler.start_caching() - @defer.inlineCallbacks def get_current_state(self, room_id, event_type=None, state_key="", latest_event_ids=None): @@ -428,9 +424,6 @@ class StateResolutionHandler(object): self._state_cache = None self.resolve_linearizer = Linearizer(name="state_resolve_lock") - def start_caching(self): - logger.debug("start_caching") - self._state_cache = ExpiringCache( cache_name="state_cache", clock=self.clock, @@ -440,8 +433,6 @@ class StateResolutionHandler(object): reset_expiry_on_get=True, ) - self._state_cache.start() - @defer.inlineCallbacks @log_function def resolve_state_groups( diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index ce85b2ae11..921a9c5b29 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -58,7 +58,6 @@ class ExpiringCache(object): self.metrics = register_cache("expiring", cache_name, self) - def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire return diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 5cbada4eda..50bc7702d2 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -65,7 +65,6 @@ class ExpiringCacheTestCase(unittest.TestCase): def test_time_eviction(self): clock = MockClock() cache = ExpiringCache("test", clock, expiry_ms=1000) - cache.start() cache["key"] = 1 clock.advance_time(0.5) -- cgit 1.5.1 From 79eded1ae4f0d341918f5c58076f1e60cd400e8a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:52:21 +0100 Subject: Make ExpiringCache slightly more performant --- synapse/util/caches/expiringcache.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 921a9c5b29..48ca2d634d 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,6 +16,8 @@ import logging from collections import OrderedDict +from six import iteritems + from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -127,7 +129,7 @@ class ExpiringCache(object): keys_to_delete = set() - for key, cache_entry in self._cache.items(): + for key, cache_entry in iteritems(self._cache): if now - cache_entry.time > self._expiry_ms: keys_to_delete.add(key) @@ -149,6 +151,8 @@ class ExpiringCache(object): class _CacheEntry(object): + __slots__ = ["time", "value"] + def __init__(self, time, value): self.time = time self.value = value -- cgit 1.5.1 From fdd1a62e8d09ddccbe685fe7d7840990a9c06241 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 14:55:47 +0100 Subject: Add a five minute cache to get_destination_retry_timings Hopefully helps with #3931 --- synapse/storage/transactions.py | 23 ++++++++++++++++++++++- synapse/util/caches/expiringcache.py | 13 +++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index baf0379a68..ab54977a75 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -23,6 +23,7 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util.caches.expiringcache import ExpiringCache from ._base import SQLBaseStore, db_to_json @@ -49,6 +50,8 @@ _UpdateTransactionRow = namedtuple( ) ) +SENTINEL = object() + class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. @@ -59,6 +62,12 @@ class TransactionStore(SQLBaseStore): self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) + self._destination_retry_cache = ExpiringCache( + cache_name="get_destination_retry_timings", + clock=self._clock, + expiry_ms=5 * 60 * 1000, + ) + def get_received_txn_response(self, transaction_id, origin): """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response @@ -155,6 +164,7 @@ class TransactionStore(SQLBaseStore): """ pass + @defer.inlineCallbacks def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -165,10 +175,20 @@ class TransactionStore(SQLBaseStore): None if not retrying Otherwise a dict for the retry scheme """ - return self.runInteraction( + + result = self._destination_retry_cache.get(destination, SENTINEL) + if result is not SENTINEL: + defer.returnValue(result) + + result = yield self.runInteraction( "get_destination_retry_timings", self._get_destination_retry_timings, destination) + # We don't hugely care about race conditions between getting and + # invalidating the cache, since we time out fairly quickly anyway. + self._destination_retry_cache[destination] = result + defer.returnValue(result) + def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( txn, @@ -196,6 +216,7 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ + self._destination_retry_cache.pop(destination) return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 48ca2d634d..346669e4ce 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -24,6 +24,9 @@ from synapse.util.caches import register_cache logger = logging.getLogger(__name__) +SENTINEL = object() + + class ExpiringCache(object): def __init__(self, cache_name, clock, max_len=0, expiry_ms=0, reset_expiry_on_get=False, iterable=False): @@ -102,6 +105,16 @@ class ExpiringCache(object): return entry.value + def pop(self, key, default=None): + value = self._cache.pop(key, SENTINEL) + if value is SENTINEL: + return default + + if self.iterable: + self._size_estimate -= len(value.value) + + return value + def __contains__(self, key): return key in self._cache -- cgit 1.5.1 From 19dc676d1aaf66a45c9a1810132952f7ae3ca2aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 16:25:42 +0100 Subject: Fix ExpiringCache.__len__ to be accurate It used to try and produce an estimate, which was sometimes negative. This caused metrics to be sad, so lets always just calculate it from scratch. --- synapse/util/caches/expiringcache.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 346669e4ce..8ac56d85ff 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,7 +16,7 @@ import logging from collections import OrderedDict -from six import iteritems +from six import iteritems, itervalues from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -59,8 +59,6 @@ class ExpiringCache(object): self.iterable = iterable - self._size_estimate = 0 - self.metrics = register_cache("expiring", cache_name, self) if not self._expiry_ms: @@ -79,16 +77,11 @@ class ExpiringCache(object): now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) - if self.iterable: - self._size_estimate += len(value) - # Evict if there are now too many items while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - removed_len = len(value.value) - self.metrics.inc_evictions(removed_len) - self._size_estimate -= removed_len + self.metrics.inc_evictions(len(value.value)) else: self.metrics.inc_evictions() @@ -111,7 +104,9 @@ class ExpiringCache(object): return default if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() return value @@ -149,7 +144,9 @@ class ExpiringCache(object): for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() logger.debug( "[%s] _prune_cache before: %d, after len: %d", @@ -158,7 +155,7 @@ class ExpiringCache(object): def __len__(self): if self.iterable: - return self._size_estimate + return sum(len(entry.value) for entry in itervalues(self._cache)) else: return len(self._cache) -- cgit 1.5.1 From e302f40e20df3b2fdb1a3fd18abd4164455e8ac7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 24 Sep 2018 23:40:05 +1000 Subject: update version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index f6a194dfc2..58244a5dd4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5rc1" +__version__ = "0.33.5" -- cgit 1.5.1 From 6b6cb32297b29a1dc6c415150b8a9a37b38349bf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 25 Sep 2018 02:54:34 +1000 Subject: bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 58244a5dd4..b1f7a89fba 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5" +__version__ = "0.33.5.1" -- cgit 1.5.1 From 787d22ed6cbc8f78fed7e316d345e54b7c6a93da Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 25 Sep 2018 00:49:26 +0100 Subject: Only lazy load self-members on initial sync Given we have disabled lazy loading for incr syncs in #3840, we can make self-LL more efficient by only doing it on initial sync. Also adds a bounds check for if/when we change our mind, so that we don't try to include LL members on sync responses with no timeline. --- changelog.d/3936.bugfix | 1 + synapse/handlers/sync.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3936.bugfix (limited to 'synapse') diff --git a/changelog.d/3936.bugfix b/changelog.d/3936.bugfix new file mode 100644 index 0000000000..49b02b9e27 --- /dev/null +++ b/changelog.d/3936.bugfix @@ -0,0 +1 @@ +Fix out-of-bounds error when LLing yourself diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b598916b21..c7d69d9d80 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -713,10 +713,6 @@ class SyncHandler(object): ) ] - # always make sure we LL ourselves so we know we're in the room - # (if we are), to fix https://github.com/vector-im/riot-web/issues/7209 - types.append((EventTypes.Member, sync_config.user.to_string())) - # only apply the filtering to room members filtered_types = [EventTypes.Member] @@ -726,6 +722,13 @@ class SyncHandler(object): } if full_state: + if lazy_load_members: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + types.append((EventTypes.Member, sync_config.user.to_string())) + if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, types=types, @@ -794,7 +797,7 @@ class SyncHandler(object): else: state_ids = {} if lazy_load_members: - if types: + if types and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. -- cgit 1.5.1 From a9d84f4e44783bc75dd931093339636732bfbf6c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 10:43:39 +0100 Subject: We require attrs 16.0.0 Ref: https://github.com/matrix-org/synapse/issues/3945 --- synapse/python_dependencies.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0d8de600cf..c779f69fa0 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -58,7 +58,9 @@ REQUIREMENTS = { "phonenumbers>=8.2.0": ["phonenumbers"], "six": ["six"], "prometheus_client": ["prometheus_client"], - "attrs": ["attr"], + + # we use attr.s(slots), which arrived in 16.0.0 + "attrs>=16.0.0": ["attr>=16.0.0"], "netaddr>=0.7.18": ["netaddr"], } -- cgit 1.5.1 From c53336986d9208925438a8e2366bac7d1267a95b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 11:19:27 +0100 Subject: Move synctl into top dir to avoid a symlink symlinks apparently break setuptools on python3 and alpine (https://bugs.python.org/issue31940), so let's stop using a symlink and just use the file directly. --- synapse/app/synctl.py | 284 ------------------------------------------------- synctl | 285 +++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 284 insertions(+), 285 deletions(-) delete mode 100755 synapse/app/synctl.py mode change 120000 => 100755 synctl (limited to 'synapse') diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py deleted file mode 100755 index d658f967ba..0000000000 --- a/synapse/app/synctl.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import collections -import errno -import glob -import os -import os.path -import signal -import subprocess -import sys -import time - -from six import iteritems - -import yaml - -SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] - -GREEN = "\x1b[1;32m" -YELLOW = "\x1b[1;33m" -RED = "\x1b[1;31m" -NORMAL = "\x1b[m" - - -def pid_running(pid): - try: - os.kill(pid, 0) - return True - except OSError as err: - if err.errno == errno.EPERM: - return True - return False - - -def write(message, colour=NORMAL, stream=sys.stdout): - if colour == NORMAL: - stream.write(message + "\n") - else: - stream.write(colour + message + NORMAL + "\n") - - -def abort(message, colour=RED, stream=sys.stderr): - write(message, colour, stream) - sys.exit(1) - - -def start(configfile): - write("Starting ...") - args = SYNAPSE - args.extend(["--daemonize", "-c", configfile]) - - try: - subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % - (configfile,), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting (exit code: %d); see above for logs" % e.returncode, - colour=RED, - ) - - -def start_worker(app, configfile, worker_configfile): - args = [ - "python", "-B", - "-m", app, - "-c", configfile, - "-c", worker_configfile - ] - - try: - subprocess.check_call(args) - write("started %s(%r)" % (app, worker_configfile), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting %s(%r) (exit code: %d); see above for logs" % ( - app, worker_configfile, e.returncode, - ), - colour=RED, - ) - - -def stop(pidfile, app): - if os.path.exists(pidfile): - pid = int(open(pidfile).read()) - try: - os.kill(pid, signal.SIGTERM) - write("stopped %s" % (app,), colour=GREEN) - except OSError as err: - if err.errno == errno.ESRCH: - write("%s not running" % (app,), colour=YELLOW) - elif err.errno == errno.EPERM: - abort("Cannot stop %s: Operation not permitted" % (app,)) - else: - abort("Cannot stop %s: Unknown error" % (app,)) - - -Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor" -]) - - -def main(): - - parser = argparse.ArgumentParser() - - parser.add_argument( - "action", - choices=["start", "stop", "restart"], - help="whether to start, stop or restart the synapse", - ) - parser.add_argument( - "configfile", - nargs="?", - default="homeserver.yaml", - help="the homeserver config file, defaults to homeserver.yaml", - ) - parser.add_argument( - "-w", "--worker", - metavar="WORKERCONFIG", - help="start or stop a single worker", - ) - parser.add_argument( - "-a", "--all-processes", - metavar="WORKERCONFIGDIR", - help="start or stop all the workers in the given directory" - " and the main synapse process", - ) - - options = parser.parse_args() - - if options.worker and options.all_processes: - write( - 'Cannot use "--worker" with "--all-processes"', - stream=sys.stderr - ) - sys.exit(1) - - configfile = options.configfile - - if not os.path.exists(configfile): - write( - "No config file found\n" - "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), options.configfile - ), - stream=sys.stderr, - ) - sys.exit(1) - - with open(configfile) as stream: - config = yaml.load(stream) - - pidfile = config["pid_file"] - cache_factor = config.get("synctl_cache_factor") - start_stop_synapse = True - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - - cache_factors = config.get("synctl_cache_factors", {}) - for cache_name, factor in iteritems(cache_factors): - os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - - worker_configfiles = [] - if options.worker: - start_stop_synapse = False - worker_configfile = options.worker - if not os.path.exists(worker_configfile): - write( - "No worker config found at %r" % (worker_configfile,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.append(worker_configfile) - - if options.all_processes: - # To start the main synapse with -a you need to add a worker file - # with worker_app == "synapse.app.homeserver" - start_stop_synapse = False - worker_configdir = options.all_processes - if not os.path.isdir(worker_configdir): - write( - "No worker config directory found at %r" % (worker_configdir,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.extend(sorted(glob.glob( - os.path.join(worker_configdir, "*.yaml") - ))) - - workers = [] - for worker_configfile in worker_configfiles: - with open(worker_configfile) as stream: - worker_config = yaml.load(stream) - worker_app = worker_config["worker_app"] - if worker_app == "synapse.app.homeserver": - # We need to special case all of this to pick up options that may - # be set in the main config file or in this worker config file. - worker_pidfile = ( - worker_config.get("pid_file") - or pidfile - ) - worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor - daemonize = worker_config.get("daemonize") or config.get("daemonize") - assert daemonize, "Main process must have daemonize set to true" - - # The master process doesn't support using worker_* config. - for key in worker_config: - if key == "worker_app": # But we allow worker_app - continue - assert not key.startswith("worker_"), \ - "Main process cannot use worker_* config" - else: - worker_pidfile = worker_config["worker_pid_file"] - worker_daemonize = worker_config["worker_daemonize"] - assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, "worker_daemonize") - worker_cache_factor = worker_config.get("synctl_cache_factor") - workers.append(Worker( - worker_app, worker_configfile, worker_pidfile, worker_cache_factor, - )) - - action = options.action - - if action == "stop" or action == "restart": - for worker in workers: - stop(worker.pidfile, worker.app) - - if start_stop_synapse: - stop(pidfile, "synapse.app.homeserver") - - # Wait for synapse to actually shutdown before starting it again - if action == "restart": - running_pids = [] - if start_stop_synapse and os.path.exists(pidfile): - running_pids.append(int(open(pidfile).read())) - for worker in workers: - if os.path.exists(worker.pidfile): - running_pids.append(int(open(worker.pidfile).read())) - if len(running_pids) > 0: - write("Waiting for process to exit before restarting...") - for running_pid in running_pids: - while pid_running(running_pid): - time.sleep(0.2) - write("All processes exited; now restarting...") - - if action == "start" or action == "restart": - if start_stop_synapse: - # Check if synapse is already running - if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): - abort("synapse.app.homeserver already running") - start(configfile) - - for worker in workers: - if worker.cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - - start_worker(worker.app, configfile, worker.configfile) - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - else: - os.environ.pop("SYNAPSE_CACHE_FACTOR", None) - - -if __name__ == "__main__": - main() diff --git a/synctl b/synctl deleted file mode 120000 index 1bdceda20a..0000000000 --- a/synctl +++ /dev/null @@ -1 +0,0 @@ -./synapse/app/synctl.py \ No newline at end of file diff --git a/synctl b/synctl new file mode 100755 index 0000000000..d658f967ba --- /dev/null +++ b/synctl @@ -0,0 +1,284 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import errno +import glob +import os +import os.path +import signal +import subprocess +import sys +import time + +from six import iteritems + +import yaml + +SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] + +GREEN = "\x1b[1;32m" +YELLOW = "\x1b[1;33m" +RED = "\x1b[1;31m" +NORMAL = "\x1b[m" + + +def pid_running(pid): + try: + os.kill(pid, 0) + return True + except OSError as err: + if err.errno == errno.EPERM: + return True + return False + + +def write(message, colour=NORMAL, stream=sys.stdout): + if colour == NORMAL: + stream.write(message + "\n") + else: + stream.write(colour + message + NORMAL + "\n") + + +def abort(message, colour=RED, stream=sys.stderr): + write(message, colour, stream) + sys.exit(1) + + +def start(configfile): + write("Starting ...") + args = SYNAPSE + args.extend(["--daemonize", "-c", configfile]) + + try: + subprocess.check_call(args) + write("started synapse.app.homeserver(%r)" % + (configfile,), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting (exit code: %d); see above for logs" % e.returncode, + colour=RED, + ) + + +def start_worker(app, configfile, worker_configfile): + args = [ + "python", "-B", + "-m", app, + "-c", configfile, + "-c", worker_configfile + ] + + try: + subprocess.check_call(args) + write("started %s(%r)" % (app, worker_configfile), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting %s(%r) (exit code: %d); see above for logs" % ( + app, worker_configfile, e.returncode, + ), + colour=RED, + ) + + +def stop(pidfile, app): + if os.path.exists(pidfile): + pid = int(open(pidfile).read()) + try: + os.kill(pid, signal.SIGTERM) + write("stopped %s" % (app,), colour=GREEN) + except OSError as err: + if err.errno == errno.ESRCH: + write("%s not running" % (app,), colour=YELLOW) + elif err.errno == errno.EPERM: + abort("Cannot stop %s: Operation not permitted" % (app,)) + else: + abort("Cannot stop %s: Unknown error" % (app,)) + + +Worker = collections.namedtuple("Worker", [ + "app", "configfile", "pidfile", "cache_factor" +]) + + +def main(): + + parser = argparse.ArgumentParser() + + parser.add_argument( + "action", + choices=["start", "stop", "restart"], + help="whether to start, stop or restart the synapse", + ) + parser.add_argument( + "configfile", + nargs="?", + default="homeserver.yaml", + help="the homeserver config file, defaults to homeserver.yaml", + ) + parser.add_argument( + "-w", "--worker", + metavar="WORKERCONFIG", + help="start or stop a single worker", + ) + parser.add_argument( + "-a", "--all-processes", + metavar="WORKERCONFIGDIR", + help="start or stop all the workers in the given directory" + " and the main synapse process", + ) + + options = parser.parse_args() + + if options.worker and options.all_processes: + write( + 'Cannot use "--worker" with "--all-processes"', + stream=sys.stderr + ) + sys.exit(1) + + configfile = options.configfile + + if not os.path.exists(configfile): + write( + "No config file found\n" + "To generate a config file, run '%s -c %s --generate-config" + " --server-name='\n" % ( + " ".join(SYNAPSE), options.configfile + ), + stream=sys.stderr, + ) + sys.exit(1) + + with open(configfile) as stream: + config = yaml.load(stream) + + pidfile = config["pid_file"] + cache_factor = config.get("synctl_cache_factor") + start_stop_synapse = True + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + + cache_factors = config.get("synctl_cache_factors", {}) + for cache_name, factor in iteritems(cache_factors): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + worker_configfiles = [] + if options.worker: + start_stop_synapse = False + worker_configfile = options.worker + if not os.path.exists(worker_configfile): + write( + "No worker config found at %r" % (worker_configfile,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.append(worker_configfile) + + if options.all_processes: + # To start the main synapse with -a you need to add a worker file + # with worker_app == "synapse.app.homeserver" + start_stop_synapse = False + worker_configdir = options.all_processes + if not os.path.isdir(worker_configdir): + write( + "No worker config directory found at %r" % (worker_configdir,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.extend(sorted(glob.glob( + os.path.join(worker_configdir, "*.yaml") + ))) + + workers = [] + for worker_configfile in worker_configfiles: + with open(worker_configfile) as stream: + worker_config = yaml.load(stream) + worker_app = worker_config["worker_app"] + if worker_app == "synapse.app.homeserver": + # We need to special case all of this to pick up options that may + # be set in the main config file or in this worker config file. + worker_pidfile = ( + worker_config.get("pid_file") + or pidfile + ) + worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor + daemonize = worker_config.get("daemonize") or config.get("daemonize") + assert daemonize, "Main process must have daemonize set to true" + + # The master process doesn't support using worker_* config. + for key in worker_config: + if key == "worker_app": # But we allow worker_app + continue + assert not key.startswith("worker_"), \ + "Main process cannot use worker_* config" + else: + worker_pidfile = worker_config["worker_pid_file"] + worker_daemonize = worker_config["worker_daemonize"] + assert worker_daemonize, "In config %r: expected '%s' to be True" % ( + worker_configfile, "worker_daemonize") + worker_cache_factor = worker_config.get("synctl_cache_factor") + workers.append(Worker( + worker_app, worker_configfile, worker_pidfile, worker_cache_factor, + )) + + action = options.action + + if action == "stop" or action == "restart": + for worker in workers: + stop(worker.pidfile, worker.app) + + if start_stop_synapse: + stop(pidfile, "synapse.app.homeserver") + + # Wait for synapse to actually shutdown before starting it again + if action == "restart": + running_pids = [] + if start_stop_synapse and os.path.exists(pidfile): + running_pids.append(int(open(pidfile).read())) + for worker in workers: + if os.path.exists(worker.pidfile): + running_pids.append(int(open(worker.pidfile).read())) + if len(running_pids) > 0: + write("Waiting for process to exit before restarting...") + for running_pid in running_pids: + while pid_running(running_pid): + time.sleep(0.2) + write("All processes exited; now restarting...") + + if action == "start" or action == "restart": + if start_stop_synapse: + # Check if synapse is already running + if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): + abort("synapse.app.homeserver already running") + start(configfile) + + for worker in workers: + if worker.cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + + start_worker(worker.app, configfile, worker.configfile) + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + else: + os.environ.pop("SYNAPSE_CACHE_FACTOR", None) + + +if __name__ == "__main__": + main() -- cgit 1.5.1 From 6cf261930ac4c5f08cd3d6bf1f5a37a2889f6e7b Mon Sep 17 00:00:00 2001 From: Jérémy Farnaud Date: Tue, 25 Sep 2018 12:55:02 +0200 Subject: added "media-src: 'self'" to CSP for resources (#3578) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Synapse doesn’t allow for media resources to be played directly from Chrome. It is a problem for users on other networks (e.g. IRC) communicating with Matrix users through a gateway. The gateway sends them the raw URL for the resource when a Matrix user uploads a video and the video cannot be played directly in Chrome using that URL. Chrome argues it is not authorized to play the video because of the Content Security Policy. Chrome checks for the "media-src" policy which is missing, and defauts to the "default-src" policy which is "none". As Synapse already sends "object-src: 'self'" I thought it wouldn’t be a problem to add "media-src: 'self'" to the CSP to fix this problem. --- changelog.d/3578.bugfix | 1 + synapse/rest/media/v1/download_resource.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/3578.bugfix (limited to 'synapse') diff --git a/changelog.d/3578.bugfix b/changelog.d/3578.bugfix new file mode 100644 index 0000000000..9c52b6fa7e --- /dev/null +++ b/changelog.d/3578.bugfix @@ -0,0 +1 @@ +Fix problem when playing media from Chrome using direct URL (thanks @remjey!) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index ca90964d1d..f911b120b1 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -52,6 +52,7 @@ class DownloadResource(Resource): b" script-src 'none';" b" plugin-types application/pdf;" b" style-src 'unsafe-inline';" + b" media-src 'self';" b" object-src 'self';" ) server_name, media_id, name = parse_media_id(request) -- cgit 1.5.1 From 3baf6e1667f0c257f288a28bd542a93c2445fd30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Sep 2018 16:25:42 +0100 Subject: Fix ExpiringCache.__len__ to be accurate It used to try and produce an estimate, which was sometimes negative. This caused metrics to be sad, so lets always just calculate it from scratch. (This appears to have been a longstanding bug, but one which has been made more of a problem by #3932 and #3933). (This was originally done by Erik as part of #3933. I'm cherry-picking it because really it's a fix in its own right) --- synapse/util/caches/expiringcache.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 921a9c5b29..9af4ec4aa8 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,6 +16,8 @@ import logging from collections import OrderedDict +from six import itervalues + from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache @@ -54,8 +56,6 @@ class ExpiringCache(object): self.iterable = iterable - self._size_estimate = 0 - self.metrics = register_cache("expiring", cache_name, self) if not self._expiry_ms: @@ -74,16 +74,11 @@ class ExpiringCache(object): now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) - if self.iterable: - self._size_estimate += len(value) - # Evict if there are now too many items while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - removed_len = len(value.value) - self.metrics.inc_evictions(removed_len) - self._size_estimate -= removed_len + self.metrics.inc_evictions(len(value.value)) else: self.metrics.inc_evictions() @@ -134,7 +129,9 @@ class ExpiringCache(object): for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() logger.debug( "[%s] _prune_cache before: %d, after len: %d", @@ -143,7 +140,7 @@ class ExpiringCache(object): def __len__(self): if self.iterable: - return self._size_estimate + return sum(len(entry.value) for entry in itervalues(self._cache)) else: return len(self._cache) -- cgit 1.5.1 From 7ee94fc1baf6c4e2970232383d76277d8378712b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 12:40:20 +0100 Subject: Log which cache is throwing exceptions --- synapse/util/caches/__init__.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 7b065b195e..f37d5bec08 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import os import six @@ -20,6 +21,8 @@ from six.moves import intern from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily +logger = logging.getLogger(__name__) + CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) @@ -76,16 +79,20 @@ def register_cache(cache_type, cache_name, cache): return [] def collect(self): - if cache_type == "response_cache": - response_cache_size.labels(cache_name).set(len(cache)) - response_cache_hits.labels(cache_name).set(self.hits) - response_cache_evicted.labels(cache_name).set(self.evicted_size) - response_cache_total.labels(cache_name).set(self.hits + self.misses) - else: - cache_size.labels(cache_name).set(len(cache)) - cache_hits.labels(cache_name).set(self.hits) - cache_evicted.labels(cache_name).set(self.evicted_size) - cache_total.labels(cache_name).set(self.hits + self.misses) + try: + if cache_type == "response_cache": + response_cache_size.labels(cache_name).set(len(cache)) + response_cache_hits.labels(cache_name).set(self.hits) + response_cache_evicted.labels(cache_name).set(self.evicted_size) + response_cache_total.labels(cache_name).set(self.hits + self.misses) + else: + cache_size.labels(cache_name).set(len(cache)) + cache_hits.labels(cache_name).set(self.hits) + cache_evicted.labels(cache_name).set(self.evicted_size) + cache_total.labels(cache_name).set(self.hits + self.misses) + except Exception as e: + logger.warn("Error calculating metrics for %s: %s", cache_name, e) + raise yield GaugeMetricFamily("__unused", "") -- cgit 1.5.1 From 8afddf7afec8c6ee790a10e24fe817d663bfb4ee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 16:34:40 +0100 Subject: Fix error handling for missing auth_event When we were authorizing an event, if there was no `m.room.create` in its auth_events, we would raise a SynapseError with a cryptic message, which then meant that we would bail out of processing any incoming events, rather than storing a rejection for the faulty event and moving on. We should treat the absent event the same as any other auth failure, by raising an AuthError, so that the event is marked as rejected. --- synapse/event_auth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 6baeccca38..02fa46ef7f 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): creation_event = auth_events.get((EventTypes.Create, ""), None) if not creation_event: - raise SynapseError( + raise AuthError( 403, - "Room %r does not exist" % (event.room_id,) + "No create event in auth events", ) creating_domain = get_domain_from_id(event.room_id) -- cgit 1.5.1 From 28781b65e7cb8b0d1fc409d0d78253662b49fe06 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 26 Sep 2018 16:16:41 +0100 Subject: fix #3854 --- synapse/storage/monthly_active_users.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 59580949f1..0fe8c8e24c 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -172,6 +172,10 @@ class MonthlyActiveUsersStore(SQLBaseStore): Deferred[bool]: True if a new entry was created, False if an existing one was updated. """ + # Am consciously deciding to lock the table on the basis that is ought + # never be a big table and alternative approaches (batching multiple + # upserts into a single txn) introduced a lot of extra complexity. + # See https://github.com/matrix-org/synapse/issues/3854 for more is_insert = yield self._simple_upsert( desc="upsert_monthly_active_user", table="monthly_active_users", @@ -181,7 +185,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): values={ "timestamp": int(self._clock.time_msec()), }, - lock=False, ) if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) -- cgit 1.5.1 From 607eec0456dde3b7e5b33c3e9297af234659caae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 16:53:55 +0100 Subject: fix docstring for FederationClient.get_state_for_room trivial fixes for docstring --- synapse/federation/federation_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5a92428f56..8bf1ad0c1f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -289,8 +289,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks @log_function def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the `current` state PDUs for a given room from - a remote home server. + """Requests all of the room state at a given event from a remote home server. Args: destination (str): The remote homeserver to query for the state. @@ -298,9 +297,10 @@ class FederationClient(FederationBase): event_id (str): The id of the event we want the state at. Returns: - Deferred: Results in a list of PDUs. + Deferred[Tuple[List[EventBase], List[EventBase]]]: + A list of events in the state, and a list of events in the auth chain + for the given event. """ - try: # First we try and ask for just the IDs, as thats far quicker if # we have most of the state and auth_chain already. -- cgit 1.5.1 From 9453c65948bce69f259c1ba1005565d86b1d8730 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 25 Sep 2018 17:38:54 +0100 Subject: remove spurious federation checks on localhost There's really no point in checking for destinations called "localhost" because there is nothing stopping people creating other DNS entries which point to 127.0.0.1. The right fix for this is https://github.com/matrix-org/synapse/issues/3953. Blocking localhost, on the other hand, means that you get a surprise when trying to connect a test server on localhost to an existing server (with a 'normal' server_name). --- synapse/federation/transaction_queue.py | 37 ++++++--------------------------- 1 file changed, 6 insertions(+), 31 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 8cbf8c4f7f..ae47aaae0b 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -137,26 +137,6 @@ class TransactionQueue(object): self._processing_pending_presence = False - def can_send_to(self, destination): - """Can we send messages to the given server? - - We can't send messages to ourselves. If we are running on localhost - then we can only federation with other servers running on localhost. - Otherwise we only federate with servers on a public domain. - - Args: - destination(str): The server we are possibly trying to send to. - Returns: - bool: True if we can send to the server. - """ - - if destination == self.server_name: - return False - if self.server_name.startswith("localhost"): - return destination.startswith("localhost") - else: - return not destination.startswith("localhost") - def notify_new_events(self, current_id): """This gets called when we have some new events we might want to send out to other servers. @@ -279,10 +259,7 @@ class TransactionQueue(object): self._order += 1 destinations = set(destinations) - destinations = set( - dest for dest in destinations if self.can_send_to(dest) - ) - + destinations.discard(self.server_name) logger.debug("Sending to: %s", str(destinations)) if not destinations: @@ -358,7 +335,7 @@ class TransactionQueue(object): for destinations, states in hosts_and_states: for destination in destinations: - if not self.can_send_to(destination): + if destination == self.server_name: continue self.pending_presence_by_dest.setdefault( @@ -377,7 +354,8 @@ class TransactionQueue(object): content=content, ) - if not self.can_send_to(destination): + if not destination == self.server_name: + logger.info("Not sending EDU to ourselves") return sent_edus_counter.inc() @@ -392,11 +370,8 @@ class TransactionQueue(object): self._attempt_new_transaction(destination) def send_device_messages(self, destination): - if destination == self.server_name or destination == "localhost": - return - - if not self.can_send_to(destination): - return + if destination == self.server_name: + logger.info("Not sending device update to ourselves") self._attempt_new_transaction(destination) -- cgit 1.5.1 From a87d419a85e1c966da5bb4fc08f3309eb78ecb7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 07:48:35 +0100 Subject: Run notify_app_services as a bg process This ensures that its resource usage metrics get recorded somewhere rather than getting lost. (It also fixes an error when called from a nested logging context which completes before the bg process) --- synapse/notifier.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/notifier.py b/synapse/notifier.py index f1d92c1395..340b16ce25 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -24,9 +24,10 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import StreamToken from synapse.util.async_helpers import ObservableDeferred, timeout_deferred -from synapse.util.logcontext import PreserveLoggingContext, run_in_background +from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -248,7 +249,10 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - run_in_background(self._notify_app_services, room_stream_id) + run_as_background_process( + "notify_app_services", + self._notify_app_services, room_stream_id, + ) if self.federation_sender: self.federation_sender.notify_new_events(room_stream_id) -- cgit 1.5.1 From e70b4ce06920102e2460dfb65bc357e2d7e8b794 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 07:56:06 +0100 Subject: Logging improvements Some logging tweaks to help with debugging incoming federation transactions --- changelog.d/3966.misc | 1 + synapse/handlers/federation.py | 4 ++++ synapse/state/v1.py | 14 ++++++++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3966.misc (limited to 'synapse') diff --git a/changelog.d/3966.misc b/changelog.d/3966.misc new file mode 100644 index 0000000000..1e3c8e1706 --- /dev/null +++ b/changelog.d/3966.misc @@ -0,0 +1 @@ +Improve the logging when handling a federation transaction \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ccdc3bfa7..a70ae8c830 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -572,6 +572,10 @@ class FederationHandler(BaseHandler): }) seen_ids.add(e.event_id) + logger.info( + "[%s %s] persisting newly-received auth/state events %s", + room_id, event_id, [e["event"].event_id for e in event_infos] + ) yield self._handle_new_events(origin, event_infos) try: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index c95477d318..7a7157b352 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -65,10 +65,15 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): for event_ids in itervalues(conflicted_state) for event_id in event_ids ) + needed_event_count = len(needed_events) if event_map is not None: needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d conflicted events", len(needed_events)) + logger.info( + "Asking for %d/%d conflicted events", + len(needed_events), + needed_event_count, + ) # dict[str, FrozenEvent]: a map from state event id to event. Only includes # the state events which are in conflict (and those in event_map) @@ -85,11 +90,16 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): ) new_needed_events = set(itervalues(auth_events)) + new_needed_event_count = len(new_needed_events) new_needed_events -= needed_events if event_map is not None: new_needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d auth events", len(new_needed_events)) + logger.info( + "Asking for %d/%d auth events", + len(new_needed_events), + new_needed_event_count, + ) state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) -- cgit 1.5.1 From 219606a6eda4aac01bdb24f69ec0d418689e653e Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 13:26:27 -0600 Subject: Fix exception documentation in matrixfederationclient.py --- synapse/http/matrixfederationclient.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 14b12cd1c4..1a79f6305b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -230,7 +230,7 @@ class MatrixFederationHttpClient(object): Returns: Deferred: resolves with the http response object on success. - Fails with ``HTTPRequestException``: if we get an HTTP response + Fails with ``HttpResponseException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -532,7 +532,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -587,7 +587,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -638,7 +638,7 @@ class MatrixFederationHttpClient(object): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Fails with ``HTTPRequestException`` if we get an HTTP response + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready @@ -682,7 +682,7 @@ class MatrixFederationHttpClient(object): Deferred: resolves with an (int,dict) tuple of the file length and a dict of the response headers. - Fails with ``HTTPRequestException`` if we get an HTTP response code + Fails with ``HttpResponseException`` if we get an HTTP response code >= 300 Fails with ``NotRetryingDestination`` if we are not yet ready -- cgit 1.5.1 From b4c3bc17343833205d3bb00009074734edce667a Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 13:26:45 -0600 Subject: Handle HttpResponseException more safely for federated groups --- synapse/handlers/groups_local.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 53e5e2648b..e36f22f128 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import (SynapseError, HttpResponseException) from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -37,9 +37,18 @@ def _create_rerouter(func_name): ) else: destination = get_domain_from_id(group_id) - return getattr(self.transport_client, func_name)( + logger.info("Triggering call") + d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) + def h(failure): + failure.trap(HttpResponseException) + e = failure.value + if e.code >= 400 and e.code < 500: + raise SynapseError(e.code, e.msg) + failure.raiseException() + d.addErrback(h) + return d return f -- cgit 1.5.1 From 82fa31799cdf5146495fa95634928a2a3f5070be Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 26 Sep 2018 14:01:02 -0600 Subject: Remove debugging statement --- synapse/handlers/groups_local.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index e36f22f128..68de8a8e53 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -37,7 +37,6 @@ def _create_rerouter(func_name): ) else: destination = get_domain_from_id(group_id) - logger.info("Triggering call") d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) -- cgit 1.5.1 From ae6ad4cf4190dbd2dadd72fc8131e4c139f8eb4a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:22:25 +0100 Subject: docstrings and unittests for storage.state (#3958) I spent ages trying to figure out how I was going mad... --- changelog.d/3958.misc | 1 + synapse/storage/state.py | 30 ++++++++++++++++++++++-------- tests/storage/test_state.py | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 8 deletions(-) create mode 100644 changelog.d/3958.misc (limited to 'synapse') diff --git a/changelog.d/3958.misc b/changelog.d/3958.misc new file mode 100644 index 0000000000..5931d06dcf --- /dev/null +++ b/changelog.d/3958.misc @@ -0,0 +1 @@ +Fix docstrings and add tests for state store methods diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 4b971efdba..3f4cbd61c4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -255,7 +255,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) @defer.inlineCallbacks - def get_state_groups_ids(self, room_id, event_ids): + def get_state_groups_ids(self, _room_id, event_ids): + """Get the event IDs of all the state for the state groups for the given events + + Args: + _room_id (str): id of the room for these events + event_ids (iterable[str]): ids of the events + + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) + """ if not event_ids: defer.returnValue({}) @@ -270,7 +280,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): - """Get the state IDs for the given state group + """Get the event IDs of all the state in the given state group Args: state_group (int) @@ -286,7 +296,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def get_state_groups(self, room_id, event_ids): """ Get the state groups for the given list of event_ids - The return value is a dict mapping group names to lists of events. + Returns: + Deferred[dict[int, list[EventBase]]]: + dict of state_group_id -> list of state events. """ if not event_ids: defer.returnValue({}) @@ -324,7 +336,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): member events (if True), or to exclude member events (if False) Returns: - dictionary state_group -> (dict of (type, state_key) -> event id) + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ results = {} @@ -732,8 +746,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types is not None: non_member_types = [t for t in types if t[0] != EventTypes.Member] @@ -788,8 +802,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types: types = frozenset(types) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index b910965932..b9c5b39d59 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -74,6 +74,45 @@ class StateStoreTestCase(tests.unittest.TestCase): self.assertEqual(s1[t].event_id, s2[t].event_id) self.assertEqual(len(s1), len(s2)) + @defer.inlineCallbacks + def test_get_state_groups_ids(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_map = list(state_group_map.values())[0] + self.assertDictEqual( + state_map, + { + (EventTypes.Create, ''): e1.event_id, + (EventTypes.Name, ''): e2.event_id, + }, + ) + + @defer.inlineCallbacks + def test_get_state_groups(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups( + self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_list = list(state_group_map.values())[0] + + self.assertEqual( + {ev.event_id for ev in state_list}, + {e1.event_id, e2.event_id}, + ) + @defer.inlineCallbacks def test_get_state_for_event(self): -- cgit 1.5.1 From 4a15a3e4d539dcea9a4a57e7cd800a926f2a17c3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Sep 2018 11:25:34 +0100 Subject: Include eventid in log lines when processing incoming federation transactions (#3959) when processing incoming transactions, it can be hard to see what's going on, because we process a bunch of stuff in parallel, and because we may end up recursively working our way through a chain of three or four events. This commit creates a way to use logcontexts to add the relevant event ids to the log lines. --- changelog.d/3959.feature | 1 + synapse/federation/federation_server.py | 32 ++++++++-------- synapse/handlers/federation.py | 65 ++++++++++++++++++++------------- synapse/util/logcontext.py | 41 +++++++++++++++++++-- tests/test_federation.py | 28 ++++++++------ tests/util/test_logcontext.py | 5 +++ 6 files changed, 115 insertions(+), 57 deletions(-) create mode 100644 changelog.d/3959.feature (limited to 'synapse') diff --git a/changelog.d/3959.feature b/changelog.d/3959.feature new file mode 100644 index 0000000000..b3a4f37a8d --- /dev/null +++ b/changelog.d/3959.feature @@ -0,0 +1 @@ +Include eventid in log lines when processing incoming federation transactions \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9a571e4fc7..819e8f7331 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,6 +46,7 @@ from synapse.replication.http.federation import ( from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache +from synapse.util.logcontext import nested_logging_context from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in @@ -187,21 +188,22 @@ class FederationServer(FederationBase): for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - try: - yield self._handle_received_pdu( - origin, pdu - ) - pdu_results[event_id] = {} - except FederationError as e: - logger.warn("Error handling PDU %s: %s", event_id, e) - pdu_results[event_id] = {"error": str(e)} - except Exception as e: - f = failure.Failure() - pdu_results[event_id] = {"error": str(e)} - logger.error( - "Failed to handle PDU %s: %s", - event_id, f.getTraceback().rstrip(), - ) + with nested_logging_context(event_id): + try: + yield self._handle_received_pdu( + origin, pdu + ) + pdu_results[event_id] = {} + except FederationError as e: + logger.warn("Error handling PDU %s: %s", event_id, e) + pdu_results[event_id] = {"error": str(e)} + except Exception as e: + f = failure.Failure() + pdu_results[event_id] = {"error": str(e)} + logger.error( + "Failed to handle PDU %s: %s", + event_id, f.getTraceback().rstrip(), + ) yield concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ccdc3bfa7..993546387c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -339,14 +339,18 @@ class FederationHandler(BaseHandler): "[%s %s] Requesting state at missing prev_event %s", room_id, event_id, p, ) - state, got_auth_chain = ( - yield self.federation_client.get_state_for_room( - origin, room_id, p, + + with logcontext.nested_logging_context(p): + state, got_auth_chain = ( + yield self.federation_client.get_state_for_room( + origin, room_id, p, + ) ) - ) - auth_chains.update(got_auth_chain) - state_group = {(x.type, x.state_key): x.event_id for x in state} - state_groups.append(state_group) + auth_chains.update(got_auth_chain) + state_group = { + (x.type, x.state_key): x.event_id for x in state + } + state_groups.append(state_group) # Resolve any conflicting state def fetch(ev_ids): @@ -483,20 +487,21 @@ class FederationHandler(BaseHandler): "[%s %s] Handling received prev_event %s", room_id, event_id, ev.event_id, ) - try: - yield self.on_receive_pdu( - origin, - ev, - sent_to_us_directly=False, - ) - except FederationError as e: - if e.code == 403: - logger.warn( - "[%s %s] Received prev_event %s failed history check.", - room_id, event_id, ev.event_id, + with logcontext.nested_logging_context(ev.event_id): + try: + yield self.on_receive_pdu( + origin, + ev, + sent_to_us_directly=False, ) - else: - raise + except FederationError as e: + if e.code == 403: + logger.warn( + "[%s %s] Received prev_event %s failed history check.", + room_id, event_id, ev.event_id, + ) + else: + raise @defer.inlineCallbacks def _process_received_pdu(self, origin, event, state, auth_chain): @@ -1135,7 +1140,8 @@ class FederationHandler(BaseHandler): try: logger.info("Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id) - yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) + with logcontext.nested_logging_context(p.event_id): + yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( "Error handling queued PDU %s from %s: %s", @@ -1581,15 +1587,22 @@ class FederationHandler(BaseHandler): Notifies about the events where appropriate. """ - contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - logcontext.run_in_background( - self._prep_event, + + @defer.inlineCallbacks + def prep(ev_info): + event = ev_info["event"] + with logcontext.nested_logging_context(suffix=event.event_id): + res = yield self._prep_event( origin, - ev_info["event"], + event, state=ev_info.get("state"), auth_events=ev_info.get("auth_events"), ) + defer.returnValue(res) + + contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( + [ + logcontext.run_in_background(prep, ev_info) for ev_info in event_infos ], consumeErrors=True, )) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a0c2d37610..89224b26cc 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -200,7 +200,7 @@ class LoggingContext(object): sentinel = Sentinel() - def __init__(self, name=None, parent_context=None): + def __init__(self, name=None, parent_context=None, request=None): self.previous_context = LoggingContext.current_context() self.name = name @@ -218,6 +218,13 @@ class LoggingContext(object): self.parent_context = parent_context + if self.parent_context is not None: + self.parent_context.copy_to(self) + + if request is not None: + # the request param overrides the request from the parent context + self.request = request + def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -256,9 +263,6 @@ class LoggingContext(object): ) self.alive = True - if self.parent_context is not None: - self.parent_context.copy_to(self) - return self def __exit__(self, type, value, traceback): @@ -439,6 +443,35 @@ class PreserveLoggingContext(object): ) +def nested_logging_context(suffix, parent_context=None): + """Creates a new logging context as a child of another. + + The nested logging context will have a 'request' made up of the parent context's + request, plus the given suffix. + + CPU/db usage stats will be added to the parent context's on exit. + + Normal usage looks like: + + with nested_logging_context(suffix): + # ... do stuff + + Args: + suffix (str): suffix to add to the parent context's 'request'. + parent_context (LoggingContext|None): parent context. Will use the current context + if None. + + Returns: + LoggingContext: new logging context. + """ + if parent_context is None: + parent_context = LoggingContext.current_context() + return LoggingContext( + parent_context=parent_context, + request=parent_context.request + "-" + suffix, + ) + + def preserve_fn(f): """Function decorator which wraps the function with run_in_background""" def g(*args, **kwargs): diff --git a/tests/test_federation.py b/tests/test_federation.py index 2540604fcc..ff55c7a627 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -6,6 +6,7 @@ from twisted.internet.defer import maybeDeferred, succeed from synapse.events import FrozenEvent from synapse.types import Requester, UserID from synapse.util import Clock +from synapse.util.logcontext import LoggingContext from tests import unittest from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver @@ -117,9 +118,10 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", lying_event, sent_to_us_directly=True - ) + with LoggingContext(request="lying_event"): + d = self.handler.on_receive_pdu( + "test.serv", lying_event, sent_to_us_directly=True + ) # Step the reactor, so the database fetches come back self.reactor.advance(1) @@ -209,11 +211,12 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", good_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - self.assertEqual(self.successResultOf(d), None) + with LoggingContext(request="good_event"): + d = self.handler.on_receive_pdu( + "test.serv", good_event, sent_to_us_directly=True + ) + self.reactor.advance(1) + self.assertEqual(self.successResultOf(d), None) bad_event = FrozenEvent( { @@ -230,10 +233,11 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", bad_event, sent_to_us_directly=True - ) - self.reactor.advance(1) + with LoggingContext(request="bad_event"): + d = self.handler.on_receive_pdu( + "test.serv", bad_event, sent_to_us_directly=True + ) + self.reactor.advance(1) extrem = maybeDeferred( self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 4633db77b3..8adaee3c8d 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -159,6 +159,11 @@ class LoggingContextTestCase(unittest.TestCase): self.assertEqual(r, "bum") self._check_test_key("one") + def test_nested_logging_context(self): + with LoggingContext(request="foo"): + nested_context = logcontext.nested_logging_context(suffix="bar") + self.assertEqual(nested_context.request, "foo-bar") + # a function which returns a deferred which has been "called", but # which had a function which returned another incomplete deferred on -- cgit 1.5.1 From e3c159863d72ba1628394497bba45dd96b9cc1ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 08:09:07 +0100 Subject: Clarifications in FederationHandler * add some comments on things that look a bit bogus * rename this `state` variable to avoid confusion with the `state` used elsewhere in this function. (There was no actual conflict, but it was a confusing bit of spaghetti.) --- synapse/handlers/federation.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 128926e719..6793f9b6c6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -341,14 +341,23 @@ class FederationHandler(BaseHandler): ) with logcontext.nested_logging_context(p): - state, got_auth_chain = ( + # XXX if any of the missing prevs share missing state or auth + # events, we'll end up requesting those missing events for + # *each* missing prev, contributing to the hammering of /event + # as per https://github.com/matrix-org/synapse/issues/2164. + remote_state, got_auth_chain = ( yield self.federation_client.get_state_for_room( origin, room_id, p, ) ) + + # XXX hrm I'm not convinced that duplicate events will compare + # for equality, so I'm not sure this does what the author + # hoped. auth_chains.update(got_auth_chain) + state_group = { - (x.type, x.state_key): x.event_id for x in state + (x.type, x.state_key): x.event_id for x in remote_state } state_groups.append(state_group) -- cgit 1.5.1 From 28223841e05a77a44ec2c0b29d1e930c68974913 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 19:17:36 +0100 Subject: more comments --- synapse/federation/federation_client.py | 2 -- synapse/handlers/federation.py | 7 +++---- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8bf1ad0c1f..d05ed91d64 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -209,8 +209,6 @@ class FederationClient(FederationBase): Will attempt to get the PDU from each destination in the list until one succeeds. - This will persist the PDU locally upon receipt. - Args: destinations (list): Which home servers to query event_id (str): event to fetch diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6793f9b6c6..38bebbf598 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -341,10 +341,9 @@ class FederationHandler(BaseHandler): ) with logcontext.nested_logging_context(p): - # XXX if any of the missing prevs share missing state or auth - # events, we'll end up requesting those missing events for - # *each* missing prev, contributing to the hammering of /event - # as per https://github.com/matrix-org/synapse/issues/2164. + # note that if any of the missing prevs share missing state or + # auth events, the requests to fetch those events are deduped + # by the get_pdu_cache in federation_client. remote_state, got_auth_chain = ( yield self.federation_client.get_state_for_room( origin, room_id, p, -- cgit 1.5.1 From a215b698c43f4cfe8a2fdb9c160c8ecd1c1297c5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 09:52:56 +0100 Subject: Fix "unhashable type: 'list'" exception in federation handling get_state_groups returns a map from state_group_id to a list of FrozenEvents, so was very much the wrong thing to be putting as one of the entries in the list passed to resolve_events_with_factory (which expects maps from (event_type, state_key) to event id). We actually want get_state_groups_ids().values() rather than get_state_groups(). This fixes the main problem in #3923, but there are other problems with this bit of code which get discovered once you do so. --- synapse/handlers/federation.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 38bebbf598..2d6b8edec4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -106,7 +106,7 @@ class FederationHandler(BaseHandler): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastore() # type: synapse.storage.DataStore self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self.server_name = hs.hostname @@ -325,12 +325,17 @@ class FederationHandler(BaseHandler): # Calculate the state of the previous events, and # de-conflict them to find the current state. - state_groups = [] auth_chains = set() try: # Get the state of the events we know about - ours = yield self.store.get_state_groups(room_id, list(seen)) - state_groups.append(ours) + ours = yield self.store.get_state_groups_ids(room_id, seen) + + # state_maps is a list of mappings from (type, state_key) to event_id + # type: list[dict[tuple[str, str], str]] + state_maps = list(ours.values()) + + # we don't need this any more, let's delete it. + del ours # Ask the remote server for the states we don't # know about @@ -355,10 +360,10 @@ class FederationHandler(BaseHandler): # hoped. auth_chains.update(got_auth_chain) - state_group = { + remote_state_map = { (x.type, x.state_key): x.event_id for x in remote_state } - state_groups.append(state_group) + state_maps.append(remote_state_map) # Resolve any conflicting state def fetch(ev_ids): @@ -368,7 +373,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_groups, {event_id: pdu}, fetch + room_version, state_maps, {event_id: pdu}, fetch, ) state = (yield self.store.get_events(state_map.values())).values() -- cgit 1.5.1 From bd61c82bdf97460a33080bcb6b2c836616a3b415 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 12:16:13 +0100 Subject: Include state from remote servers in pdu handling If we've fetched state events from remote servers in order to resolve the state for a new event, we need to actually pass those events into resolve_events_with_factory (so that it can do the state res) and then persist the ones we need - otherwise other bits of the codebase get confused about why we have state groups pointing to non-existent events. --- synapse/handlers/federation.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2d6b8edec4..cdad565d04 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -326,6 +326,9 @@ class FederationHandler(BaseHandler): # Calculate the state of the previous events, and # de-conflict them to find the current state. auth_chains = set() + event_map = { + event_id: pdu, + } try: # Get the state of the events we know about ours = yield self.store.get_state_groups_ids(room_id, seen) @@ -365,18 +368,30 @@ class FederationHandler(BaseHandler): } state_maps.append(remote_state_map) + for x in remote_state: + event_map[x.event_id] = x + # Resolve any conflicting state + @defer.inlineCallbacks def fetch(ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False + fetched = yield self.store.get_events( + ev_ids, get_prev_content=False, check_redacted=False, ) + # add any events we fetch here to the `event_map` so that we + # can use them to build the state event list below. + event_map.update(fetched) + defer.returnValue(fetched) room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_maps, {event_id: pdu}, fetch, + room_version, state_maps, event_map, fetch, ) - state = (yield self.store.get_events(state_map.values())).values() + # we need to give _process_received_pdu the actual state events + # rather than event ids, so generate that now. + state = [ + event_map[e] for e in six.itervalues(state_map) + ] auth_chain = list(auth_chains) except Exception: logger.warn( -- cgit 1.5.1 From 333bee27f53916bf5354a39a79aa468967730326 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 26 Sep 2018 19:49:59 +0100 Subject: Include event when resolving state for missing prevs If we have a forward extremity for a room as `E`, and you receive `A`, `B`, s.t. `A -> B -> E`, and `B` also points to an unknown event `X`, then we need to do state res between `X` and `E`. When that happens, we need to make sure we include `X` in the state that goes into the state res alg. Fixes #3934. --- synapse/handlers/federation.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cdad565d04..d05b63673f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -323,8 +323,8 @@ class FederationHandler(BaseHandler): affected=pdu.event_id, ) - # Calculate the state of the previous events, and - # de-conflict them to find the current state. + # Calculate the state after each of the previous events, and + # resolve them to find the correct state at the current event. auth_chains = set() event_map = { event_id: pdu, @@ -358,6 +358,20 @@ class FederationHandler(BaseHandler): ) ) + # we want the state *after* p; get_state_for_room returns the + # state *before* p. + remote_event = yield self.federation_client.get_pdu( + [origin], p, outlier=True, + ) + + if remote_event is None: + raise Exception( + "Unable to get missing prev_event %s" % (p, ) + ) + + if remote_event.is_state(): + remote_state.append(remote_event) + # XXX hrm I'm not convinced that duplicate events will compare # for equality, so I'm not sure this does what the author # hoped. -- cgit 1.5.1 From dc5db01ff25dac6ec74ceea9b4d815a8c43cd7dd Mon Sep 17 00:00:00 2001 From: Schnuffle Date: Thu, 27 Sep 2018 13:38:50 +0200 Subject: Replaced all occurences of e.message with str(e) Signed-off-by: Schnuffle --- scripts-dev/dump_macaroon.py | 2 +- synapse/api/filtering.py | 2 +- synapse/app/__init__.py | 2 +- synapse/app/appservice.py | 2 +- synapse/app/client_reader.py | 2 +- synapse/app/event_creator.py | 2 +- synapse/app/federation_reader.py | 2 +- synapse/app/federation_sender.py | 2 +- synapse/app/frontend_proxy.py | 2 +- synapse/app/homeserver.py | 4 ++-- synapse/app/media_repository.py | 2 +- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 2 +- synapse/app/user_dir.py | 2 +- synapse/config/__main__.py | 2 +- synapse/handlers/e2e_keys.py | 2 +- synapse/handlers/profile.py | 2 +- tests/unittest.py | 2 +- 18 files changed, 19 insertions(+), 19 deletions(-) (limited to 'synapse') diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 6e45be75d6..fcc5568835 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -21,4 +21,4 @@ try: verifier.verify(macaroon, key) print "Signature is correct" except Exception as e: - print e.message + print str(e) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index a31a9a17e0..eed8c67e6a 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -226,7 +226,7 @@ class Filtering(object): jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA, format_checker=FormatChecker()) except jsonschema.ValidationError as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) class FilterCollection(object): diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index 3b6b9368b8..c3afcc573b 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -24,7 +24,7 @@ try: python_dependencies.check_requirements() except python_dependencies.MissingRequirementError as e: message = "\n".join([ - "Missing Requirement: %s" % (e.message,), + "Missing Requirement: %s" % (str(e),), "To install run:", " pip install --upgrade --force \"%s\"" % (e.dependency,), "", diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 02039f7e79..8559e141af 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -136,7 +136,7 @@ def start(config_options): "Synapse appservice", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.appservice" diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 4c73c637bb..76aed8c60a 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -153,7 +153,7 @@ def start(config_options): "Synapse client reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.client_reader" diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index bc82197b2a..9060ab14f6 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -169,7 +169,7 @@ def start(config_options): "Synapse event creator", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.event_creator" diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 18ca71ef99..228a297fb8 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -140,7 +140,7 @@ def start(config_options): "Synapse federation reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_reader" diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 6501c57792..e9a99d76e1 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -160,7 +160,7 @@ def start(config_options): "Synapse federation sender", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_sender" diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index b076fbe522..fc4b25de1c 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -228,7 +228,7 @@ def start(config_options): "Synapse frontend proxy", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.frontend_proxy" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8c5d858b0b..a98fdbd210 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -301,7 +301,7 @@ class SynapseHomeServer(HomeServer): try: database_engine.check_database(db_conn.cursor()) except IncorrectDatabaseSetup as e: - quit_with_error(e.message) + quit_with_error(str(e)) # Gauges to expose monthly active user control metrics @@ -328,7 +328,7 @@ def setup(config_options): config_options, ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 992d182dba..acc0487adc 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -133,7 +133,7 @@ def start(config_options): "Synapse media repository", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.media_repository" diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 2ec4c7defb..630dcda478 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -191,7 +191,7 @@ def start(config_options): "Synapse pusher", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.pusher" diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index df81b7bcbe..9a7fc6ee9d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -410,7 +410,7 @@ def start(config_options): "Synapse synchrotron", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.synchrotron" diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index b383e79c1c..0a5f62b509 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -188,7 +188,7 @@ def start(config_options): "Synapse user directory", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.user_dir" diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 58c97a70af..8fccf573ee 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -25,7 +25,7 @@ if __name__ == "__main__": try: config = HomeServerConfig.load_config("", sys.argv[3:]) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) print (getattr(config, key)) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 578e9250fb..9dc46aa15f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -341,7 +341,7 @@ class E2eKeysHandler(object): def _exception_to_failure(e): if isinstance(e, CodeMessageException): return { - "status": e.code, "message": e.message, + "status": e.code, "message": str(e), } if isinstance(e, NotRetryingDestination): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 75b8b7ce6a..f284d5a385 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -278,7 +278,7 @@ class BaseProfileHandler(BaseHandler): except Exception as e: logger.warn( "Failed to update join event for room %s - %s", - room_id, str(e.message) + room_id, str(e) ) diff --git a/tests/unittest.py b/tests/unittest.py index ef905e6389..043710afaf 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -121,7 +121,7 @@ class TestCase(unittest.TestCase): try: self.assertEquals(attrs[key], getattr(obj, key)) except AssertionError as e: - raise (type(e))(e.message + " for '.%s'" % key) + raise (type(e))(str(e) + " for '.%s'" % key) def assert_dict(self, required, actual): """Does a partial assert of a dict. -- cgit 1.5.1 From b3064532d005e08a3f5ec0c2a2decf688208f5e6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 27 Sep 2018 23:21:54 +1000 Subject: Run our oldest supported configuration in CI (#3952) --- .travis.yml | 3 +++ changelog.d/3952.misc | 1 + synapse/python_dependencies.py | 27 ++++++++++++++------------- tox.ini | 20 ++++++++++++++++++++ 4 files changed, 38 insertions(+), 13 deletions(-) create mode 100644 changelog.d/3952.misc (limited to 'synapse') diff --git a/.travis.yml b/.travis.yml index b6faca4b92..2077f6af72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,9 @@ matrix: - python: 2.7 env: TOX_ENV=py27 + - python: 2.7 + env: TOX_ENV=py27-old + - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" services: diff --git a/changelog.d/3952.misc b/changelog.d/3952.misc new file mode 100644 index 0000000000..015e4a43e6 --- /dev/null +++ b/changelog.d/3952.misc @@ -0,0 +1 @@ +Run the test suite on the oldest supported versions of our dependencies in CI. \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c779f69fa0..0f339a0320 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -33,31 +33,32 @@ logger = logging.getLogger(__name__) # [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies REQUIREMENTS = { "jsonschema>=2.5.1": ["jsonschema>=2.5.1"], - "frozendict>=0.4": ["frozendict"], + "frozendict>=1": ["frozendict"], "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"], "canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"], "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], - "service_identity>=1.0.0": ["service_identity>=1.0.0"], + "service_identity>=16.0.0": ["service_identity>=16.0.0"], "Twisted>=17.1.0": ["twisted>=17.1.0"], "treq>=15.1": ["treq>=15.1"], # Twisted has required pyopenssl 16.0 since about Twisted 16.6. "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], - "pyyaml": ["yaml"], - "pyasn1": ["pyasn1"], - "daemonize": ["daemonize"], - "bcrypt": ["bcrypt>=3.1.0"], - "pillow": ["PIL"], - "pydenticon": ["pydenticon"], - "sortedcontainers": ["sortedcontainers"], - "pysaml2>=3.0.0": ["saml2>=3.0.0"], - "pymacaroons-pynacl": ["pymacaroons"], + "pyyaml>=3.11": ["yaml"], + "pyasn1>=0.1.9": ["pyasn1"], + "pyasn1-modules>=0.0.7": ["pyasn1_modules"], + "daemonize>=2.3.1": ["daemonize"], + "bcrypt>=3.1.0": ["bcrypt>=3.1.0"], + "pillow>=3.1.2": ["PIL"], + "pydenticon>=0.2": ["pydenticon"], + "sortedcontainers>=1.4.4": ["sortedcontainers"], + "pysaml2>=3.0.0": ["saml2"], + "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], - "six": ["six"], - "prometheus_client": ["prometheus_client"], + "six>=1.10": ["six"], + "prometheus_client>=0.0.18": ["prometheus_client"], # we use attr.s(slots), which arrived in 16.0.0 "attrs>=16.0.0": ["attr>=16.0.0"], diff --git a/tox.ini b/tox.ini index e4db563b4b..87b5e4782d 100644 --- a/tox.ini +++ b/tox.ini @@ -64,6 +64,26 @@ setenv = {[base]setenv} SYNAPSE_POSTGRES = 1 +# A test suite for the oldest supported versions of Python libraries, to catch +# any uses of APIs not available in them. +[testenv:py27-old] +skip_install=True +deps = + # Old automat version for Twisted + Automat == 0.3.0 + + mock + lxml +commands = + /usr/bin/find "{toxinidir}" -name '*.pyc' -delete + # Make all greater-thans equals so we test the oldest version of our direct + # dependencies, but make the pyopenssl 17.0, which can work against an + # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis). + /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install' + # Install Synapse itself. This won't update any libraries. + pip install -e . + {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} + [testenv:py35] usedevelop=true -- cgit 1.5.1 From 07340cdacad901a55abd0811a1fca86061b752bd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 01:42:53 +0100 Subject: untested stab at autocreating autojoin rooms --- synapse/config/registration.py | 4 ++++ synapse/handlers/register.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'synapse') diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 0fb964eb67..686c7fa9f7 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,6 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) @@ -98,6 +99,9 @@ class RegistrationConfig(Config): # to these rooms #auto_join_rooms: # - "#example:example.com" + + # Have first user on server autocreate autojoin rooms + autocreate_auto_join_rooms: true """ % locals() def add_arguments(self, parser): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index da914c46ff..0e5337d26c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,6 +50,8 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self._room_creation_handler = hs.get_room_creation_handler() + self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -513,6 +515,22 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): + + # try to create the room if we're the first user on the server + if self.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + if count == 1 and RoomAlias.is_valid(room_identifier): + info = yield self._room_creation_handler.create_room( + requester, + config={ + "preset": "public_chat", + }, + ratelimit=False, + ) + room_id = info["room_id"] + + yield create_association(self, requester.user, room_identifier, room_id) + room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): -- cgit 1.5.1 From 19475cf3371819170737515c414e3cc7c2a32d43 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 28 Sep 2018 11:55:57 +0100 Subject: Remove redundant call to start_get_pdu_cache I think this got forgotten in #3932. We were getting away with it because it was the last call in this function. --- changelog.d/3980.bugfix | 1 + synapse/app/homeserver.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/3980.bugfix (limited to 'synapse') diff --git a/changelog.d/3980.bugfix b/changelog.d/3980.bugfix new file mode 100644 index 0000000000..7578414ede --- /dev/null +++ b/changelog.d/3980.bugfix @@ -0,0 +1 @@ +Fix some instances of ExpiringCache not expiring cache items diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a98fdbd210..e3f0d99a3f 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -386,7 +386,6 @@ def setup(config_options): hs.get_pusherpool().start() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() - hs.get_federation_client().start_get_pdu_cache() reactor.callWhenRunning(start) -- cgit 1.5.1 From 965154d60af59b69eac01f7cfcf821a757ae93fa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 28 Sep 2018 12:45:54 +0100 Subject: Fix complete fail to do the right thing --- synapse/federation/transaction_queue.py | 3 ++- synapse/handlers/typing.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index ae47aaae0b..98b5950800 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -354,7 +354,7 @@ class TransactionQueue(object): content=content, ) - if not destination == self.server_name: + if destination == self.server_name: logger.info("Not sending EDU to ourselves") return @@ -372,6 +372,7 @@ class TransactionQueue(object): def send_device_messages(self, destination): if destination == self.server_name: logger.info("Not sending device update to ourselves") + return self._attempt_new_transaction(destination) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 2d2d3d5a0d..bf82b3f864 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -218,6 +218,7 @@ class TypingHandler(object): for domain in set(get_domain_from_id(u) for u in users): if domain != self.server_name: + logger.debug("sending typing update to %s", domain) self.federation.send_edu( destination=domain, edu_type="m.typing", -- cgit 1.5.1 From 8f646f2d04e7d21aa0570826bd78a0bbc3bb706b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 15:37:28 +0100 Subject: fix UTs --- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 686c7fa9f7..dcf2374ed2 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,7 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) - self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 0e5337d26c..a358bfc723 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,8 +50,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self._room_creation_handler = hs.get_room_creation_handler() - self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -520,7 +518,8 @@ class RegistrationHandler(BaseHandler): if self.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - info = yield self._room_creation_handler.create_room( + room_creation_handler = hs.get_room_creation_handler() + info = yield room_creation_handler.create_room( requester, config={ "preset": "public_chat", @@ -529,7 +528,13 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - yield create_association(self, requester.user, room_identifier, room_id) + directory_handler = hs.get_handlers().directory_handler + yield directory_handler.create_association( + self, + requester.user, + room_identifier, + room_id + ) room_id = None room_member_handler = self.hs.get_room_member_handler() -- cgit 1.5.1 From 5b68f29f48acfa45e671af1fb325d0c0d532f3d6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:14:40 +0100 Subject: fix thinkos --- synapse/handlers/register.py | 12 ++++++------ synapse/storage/directory.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a358bfc723..05e8f4ea73 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -515,10 +515,10 @@ class RegistrationHandler(BaseHandler): def _join_user_to_room(self, requester, room_identifier): # try to create the room if we're the first user on the server - if self.config.autocreate_auto_join_rooms: + if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = hs.get_room_creation_handler() + room_creation_handler = self.hs.get_room_creation_handler() info = yield room_creation_handler.create_room( requester, config={ @@ -528,11 +528,11 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - directory_handler = hs.get_handlers().directory_handler + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - self, - requester.user, - room_identifier, + requester.user.to_string(), + room_alias, room_id ) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index cfb687cb53..61a029a53c 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -90,7 +90,7 @@ class DirectoryWorkerStore(SQLBaseStore): class DirectoryStore(DirectoryWorkerStore): @defer.inlineCallbacks def create_room_alias_association(self, room_alias, room_id, servers, creator=None): - """ Creates an associatin between a room alias and room_id/servers + """ Creates an association between a room alias and room_id/servers Args: room_alias (RoomAlias) -- cgit 1.5.1 From 23b6a0537f9e2b46a5857a7c7bd0a03d04d4095d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:19:37 +0100 Subject: emit room aliases event --- synapse/handlers/register.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 05e8f4ea73..7584064d5d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,9 +531,14 @@ class RegistrationHandler(BaseHandler): directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - requester.user.to_string(), - room_alias, - room_id + user_id=requester.user.to_string(), + room_alias=room_alias, + room_id=room_id, + servers=[self.hs.hostname], + ) + + yield directory_handler.send_room_alias_update_event( + requester, requester.user.to_string(), room_id ) room_id = None -- cgit 1.5.1 From 8ea887856c1bcb97c90d88fe20f7f82ed7f48967 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 11:59:52 +0100 Subject: Don't update eviction metrics on explicit removal --- synapse/util/caches/expiringcache.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 8ac56d85ff..f2f55ba6c9 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -103,11 +103,6 @@ class ExpiringCache(object): if value is SENTINEL: return default - if self.iterable: - self.metrics.inc_evictions(len(value.value)) - else: - self.metrics.inc_evictions() - return value def __contains__(self, key): -- cgit 1.5.1 From 4f3e3ac192f2c5ff37198f50af99bc8fbd57beca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 12:25:27 +0100 Subject: Correctly match 'dict.pop' api --- synapse/util/caches/expiringcache.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index f2f55ba6c9..f369780277 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -98,10 +98,18 @@ class ExpiringCache(object): return entry.value - def pop(self, key, default=None): - value = self._cache.pop(key, SENTINEL) + def pop(self, key, default=SENTINEL): + """Removes and returns the value with the given key from the cache. + + If the key isn't in the cache then `default` will be returned if + specified, otherwise `KeyError` will get raised. + + Identical functionality to `dict.pop(..)`. + """ + + value = self._cache.pop(key, default) if value is SENTINEL: - return default + raise KeyError(key) return value -- cgit 1.5.1 From 82f922b4af8d41e15484e1913775d234c548d9f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Oct 2018 14:19:36 +0100 Subject: Fix lazy loaded sync with rejected state events In particular, we assume that the name and canonical alias events in the state have not been rejected. In practice this may not be the case (though we should probably think about fixing that) so lets ensure that we gracefully handle that case, rather than 404'ing the sync request like we do now. --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c7d69d9d80..67b8ca28c7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -567,13 +567,13 @@ class SyncHandler(object): # be a valid name or canonical_alias - i.e. we're checking that they # haven't been "deleted" by blatting {} over the top. if name_id: - name = yield self.store.get_event(name_id, allow_none=False) + name = yield self.store.get_event(name_id, allow_none=True) if name and name.content: defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( - canonical_alias_id, allow_none=False, + canonical_alias_id, allow_none=True, ) if canonical_alias and canonical_alias.content: defer.returnValue(summary) -- cgit 1.5.1 From 6e05fd032c670080f9e9f99f2e2e8b8eccf24c7d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 2 Oct 2018 00:11:58 +1000 Subject: Fix userconsent on Python 3 (#3938) --- changelog.d/3938.bugfix | 1 + synapse/api/urls.py | 2 +- tests/server_notices/test_consent.py | 100 +++++++++++++++++++++++++++++++++ tests/storage/test_client_ips.py | 48 +--------------- tests/unittest.py | 80 +++++++++++++++++++++++++- tests/utils.py | 105 +++++++++++++++++++---------------- 6 files changed, 240 insertions(+), 96 deletions(-) create mode 100644 changelog.d/3938.bugfix create mode 100644 tests/server_notices/test_consent.py (limited to 'synapse') diff --git a/changelog.d/3938.bugfix b/changelog.d/3938.bugfix new file mode 100644 index 0000000000..01ccca21a7 --- /dev/null +++ b/changelog.d/3938.bugfix @@ -0,0 +1 @@ +Sending server notices regarding user consent now works on Python 3. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 71347912f1..6d9f1ca0ef 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -64,7 +64,7 @@ class ConsentURIBuilder(object): """ mac = hmac.new( key=self._hmac_secret, - msg=user_id, + msg=user_id.encode('ascii'), digestmod=sha256, ).hexdigest() consent_uri = "%s_matrix/consent?%s" % ( diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py new file mode 100644 index 0000000000..95badc985e --- /dev/null +++ b/tests/server_notices/test_consent.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v2_alpha import sync + +from tests import unittest + + +class ConsentNoticesTests(unittest.HomeserverTestCase): + + servlets = [ + sync.register_servlets, + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + self.consent_notice_message = "consent %(consent_uri)s" + config = self.default_config() + config.user_consent_version = "1" + config.user_consent_server_notice_content = { + "msgtype": "m.text", + "body": self.consent_notice_message, + } + config.public_baseurl = "https://example.com/" + config.form_secret = "123abc" + + config.server_notices_mxid = "@notices:test" + config.server_notices_mxid_display_name = "test display name" + config.server_notices_mxid_avatar_url = None + config.server_notices_room_name = "Server Notices" + + hs = self.setup_test_homeserver(config=config) + + return hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("bob", "abc123") + self.access_token = self.login("bob", "abc123") + + def test_get_sync_message(self): + """ + When user consent server notices are enabled, a sync will cause a notice + to fire (in a room which the user is invited to). The notice contains + the notice URL + an authentication code. + """ + # Initial sync, to get the user consent room invite + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the Room ID to join + room_id = list(channel.json_body["rooms"]["invite"].keys())[0] + + # Join the room + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/" + room_id + "/join", + access_token=self.access_token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Sync again, to get the message in the room + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the message + room = channel.json_body["rooms"]["join"][room_id] + messages = [ + x for x in room["timeline"]["events"] if x["type"] == "m.room.message" + ] + + # One message, with the consent URL + self.assertEqual(len(messages), 1) + self.assertTrue( + messages[0]["content"]["body"].startswith( + "consent https://example.com/_matrix/consent" + ) + ) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 2ffbb9f14f..4577e9422b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -14,10 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib -import hmac -import json - from mock import Mock from twisted.internet import defer @@ -145,34 +141,8 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): return hs def prepare(self, hs, reactor, clock): - self.hs.config.registration_shared_secret = u"shared" self.store = self.hs.get_datastore() - - # Create the user - request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") - self.render(request) - nonce = channel.json_body["nonce"] - - want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) - want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin") - want_mac = want_mac.hexdigest() - - body = json.dumps( - { - "nonce": nonce, - "username": "bob", - "password": "abc123", - "admin": True, - "mac": want_mac, - } - ) - request, channel = self.make_request( - "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') - ) - self.render(request) - - self.assertEqual(channel.code, 200) - self.user_id = channel.json_body["user_id"] + self.user_id = self.register_user("bob", "abc123", True) def test_request_with_xforwarded(self): """ @@ -194,20 +164,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): def _runtest(self, headers, expected_ip, make_request_args): device_id = "bleb" - body = json.dumps( - { - "type": "m.login.password", - "user": "bob", - "password": "abc123", - "device_id": device_id, - } - ) - request, channel = self.make_request( - "POST", "/_matrix/client/r0/login", body.encode('utf8'), **make_request_args - ) - self.render(request) - self.assertEqual(channel.code, 200) - access_token = channel.json_body["access_token"].encode('ascii') + access_token = self.login("bob", "abc123", device_id=device_id) # Advance to a known time self.reactor.advance(123456 - self.reactor.seconds()) @@ -215,7 +172,6 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/admin/users/" + self.user_id, - body.encode('utf8'), access_token=access_token, **make_request_args ) diff --git a/tests/unittest.py b/tests/unittest.py index 043710afaf..a59291cc60 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import hashlib +import hmac import logging from mock import Mock @@ -32,6 +34,7 @@ from synapse.types import UserID, create_requester from synapse.util.logcontext import LoggingContextFilter from tests.server import get_clock, make_request, render, setup_test_homeserver +from tests.utils import default_config # Set up putting Synapse's logs into Trial's. rootLogger = logging.getLogger() @@ -121,7 +124,7 @@ class TestCase(unittest.TestCase): try: self.assertEquals(attrs[key], getattr(obj, key)) except AssertionError as e: - raise (type(e))(str(e) + " for '.%s'" % key) + raise (type(e))(e.message + " for '.%s'" % key) def assert_dict(self, required, actual): """Does a partial assert of a dict. @@ -223,6 +226,15 @@ class HomeserverTestCase(TestCase): hs = self.setup_test_homeserver() return hs + def default_config(self, name="test"): + """ + Get a default HomeServer config object. + + Args: + name (str): The homeserver name/domain. + """ + return default_config(name) + def prepare(self, reactor, clock, homeserver): """ Prepare for the test. This involves things like mocking out parts of @@ -297,3 +309,69 @@ class HomeserverTestCase(TestCase): return d self.pump() return self.successResultOf(d) + + def register_user(self, username, password, admin=False): + """ + Register a user. Requires the Admin API be registered. + + Args: + username (bytes/unicode): The user part of the new user. + password (bytes/unicode): The password of the new user. + admin (bool): Whether the user should be created as an admin + or not. + + Returns: + The MXID of the new user (unicode). + """ + self.hs.config.registration_shared_secret = u"shared" + + # Create the user + request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + nonce_str = b"\x00".join([username.encode('utf8'), password.encode('utf8')]) + if admin: + nonce_str += b"\x00admin" + else: + nonce_str += b"\x00notadmin" + want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str) + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": username, + "password": password, + "admin": admin, + "mac": want_mac, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + user_id = channel.json_body["user_id"] + return user_id + + def login(self, username, password, device_id=None): + """ + Log in a user, and get an access token. Requires the Login API be + registered. + + """ + body = {"type": "m.login.password", "user": username, "password": password} + if device_id: + body["device_id"] = device_id + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + access_token = channel.json_body["access_token"].encode('ascii') + return access_token diff --git a/tests/utils.py b/tests/utils.py index aaed1149c3..1ef80e7b79 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -96,6 +96,62 @@ def setupdb(): atexit.register(_cleanup) +def default_config(name): + """ + Create a reasonable test config. + """ + config = Mock() + config.signing_key = [MockKey()] + config.event_cache_size = 1 + config.enable_registration = True + config.macaroon_secret_key = "not even a little secret" + config.expire_access_token = False + config.server_name = name + config.trusted_third_party_id_servers = [] + config.room_invite_state_types = [] + config.password_providers = [] + config.worker_replication_url = "" + config.worker_app = None + config.email_enable_notifs = False + config.block_non_admin_invites = False + config.federation_domain_whitelist = None + config.federation_rc_reject_limit = 10 + config.federation_rc_sleep_limit = 10 + config.federation_rc_sleep_delay = 100 + config.federation_rc_concurrent = 10 + config.filter_timeline_limit = 5000 + config.user_directory_search_all_users = False + config.user_consent_server_notice_content = None + config.block_events_without_consent_error = None + config.media_storage_providers = [] + config.auto_join_rooms = [] + config.limit_usage_by_mau = False + config.hs_disabled = False + config.hs_disabled_message = "" + config.hs_disabled_limit_type = "" + config.max_mau_value = 50 + config.mau_trial_days = 0 + config.mau_limits_reserved_threepids = [] + config.admin_contact = None + config.rc_messages_per_second = 10000 + config.rc_message_burst_count = 10000 + + # we need a sane default_room_version, otherwise attempts to create rooms will + # fail. + config.default_room_version = "1" + + # disable user directory updates, because they get done in the + # background, which upsets the test runner. + config.update_user_directory = False + + def is_threepid_reserved(threepid): + return ServerConfig.is_threepid_reserved(config, threepid) + + config.is_threepid_reserved.side_effect = is_threepid_reserved + + return config + + class TestHomeServer(HomeServer): DATASTORE_CLASS = DataStore @@ -124,54 +180,7 @@ def setup_test_homeserver( from twisted.internet import reactor if config is None: - config = Mock() - config.signing_key = [MockKey()] - config.event_cache_size = 1 - config.enable_registration = True - config.macaroon_secret_key = "not even a little secret" - config.expire_access_token = False - config.server_name = name - config.trusted_third_party_id_servers = [] - config.room_invite_state_types = [] - config.password_providers = [] - config.worker_replication_url = "" - config.worker_app = None - config.email_enable_notifs = False - config.block_non_admin_invites = False - config.federation_domain_whitelist = None - config.federation_rc_reject_limit = 10 - config.federation_rc_sleep_limit = 10 - config.federation_rc_sleep_delay = 100 - config.federation_rc_concurrent = 10 - config.filter_timeline_limit = 5000 - config.user_directory_search_all_users = False - config.user_consent_server_notice_content = None - config.block_events_without_consent_error = None - config.media_storage_providers = [] - config.auto_join_rooms = [] - config.limit_usage_by_mau = False - config.hs_disabled = False - config.hs_disabled_message = "" - config.hs_disabled_limit_type = "" - config.max_mau_value = 50 - config.mau_trial_days = 0 - config.mau_limits_reserved_threepids = [] - config.admin_contact = None - config.rc_messages_per_second = 10000 - config.rc_message_burst_count = 10000 - - # we need a sane default_room_version, otherwise attempts to create rooms will - # fail. - config.default_room_version = "1" - - # disable user directory updates, because they get done in the - # background, which upsets the test runner. - config.update_user_directory = False - - def is_threepid_reserved(threepid): - return ServerConfig.is_threepid_reserved(config, threepid) - - config.is_threepid_reserved.side_effect = is_threepid_reserved + config = default_config(name) config.use_frozen_dicts = True config.ldap_enabled = False -- cgit 1.5.1 From 8174c6725b5271923930432d1927dd39cff3547c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Oct 2018 18:48:51 +0100 Subject: Avoid reraise, to improve stacktraces --- changelog.d/3989.misc | 1 + synapse/handlers/federation.py | 20 ++++++++++---------- synapse/handlers/message.py | 25 +++++++++++++------------ 3 files changed, 24 insertions(+), 22 deletions(-) create mode 100644 changelog.d/3989.misc (limited to 'synapse') diff --git a/changelog.d/3989.misc b/changelog.d/3989.misc new file mode 100644 index 0000000000..26700d168f --- /dev/null +++ b/changelog.d/3989.misc @@ -0,0 +1 @@ +Improve stacktraces in certain exceptions in the logs diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d05b63673f..45d955e6f5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -18,7 +18,6 @@ import itertools import logging -import sys import six from six import iteritems, itervalues @@ -1602,6 +1601,9 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: if not event.internal_metadata.is_outlier() and not backfilled: yield self.action_generator.handle_push_actions_for_event( @@ -1612,15 +1614,13 @@ class FederationHandler(BaseHandler): [(event, context)], backfilled=backfilled, ) - except: # noqa: E722, as we reraise the exception this is fine. - tp, value, tb = sys.exc_info() - - logcontext.run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - - six.reraise(tp, value, tb) + success = True + finally: + if not success: + logcontext.run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) defer.returnValue(context) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e484061cc0..4954b23a0d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -14,9 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import sys -import six from six import iteritems, itervalues, string_types from canonicaljson import encode_canonical_json, json @@ -624,6 +622,9 @@ class EventCreationHandler(object): event, context ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: # If we're a worker we need to hit out to the master. if self.config.worker_app: @@ -636,6 +637,7 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) + success = True return yield self.persist_and_notify_client_event( @@ -645,17 +647,16 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) - except: # noqa: E722, as we reraise the exception this is fine. - # Ensure that we actually remove the entries in the push actions - # staging area, if we calculated them. - tp, value, tb = sys.exc_info() - - run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - six.reraise(tp, value, tb) + success = True + finally: + if not success: + # Ensure that we actually remove the entries in the push actions + # staging area, if we calculated them. + run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) @defer.inlineCallbacks def persist_and_notify_client_event( -- cgit 1.5.1 From 334e075dd85a21e859eb89948b606c2710b932ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 11:49:39 +0100 Subject: Fix error when logging incomplete requests If a connection is lost before a request is read from Request, Twisted sets `method` (and `uri`) attributes to dummy values. These dummy values have incorrect types (i.e. they're not bytes), and so things like `__repr__` would raise an exception. To fix this we had a helper method to return the method with a consistent type. --- synapse/http/site.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/http/site.py b/synapse/http/site.py index 50be2de3bb..e508c0bd4f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -75,14 +75,14 @@ class SynapseRequest(Request): return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), - self.method.decode('ascii', errors='replace'), + self.get_method(), self.get_redacted_uri(), self.clientproto.decode('ascii', errors='replace'), self.site.site_tag, ) def get_request_id(self): - return "%s-%i" % (self.method.decode('ascii'), self.request_seq) + return "%s-%i" % (self.get_method(), self.request_seq) def get_redacted_uri(self): uri = self.uri @@ -90,6 +90,21 @@ class SynapseRequest(Request): uri = self.uri.decode('ascii') return redact_uri(uri) + def get_method(self): + """Gets the method associated with the request (or placeholder if not + method has yet been received). + + Note: This is necessary as the placeholder value in twisted is str + rather than bytes, so we need to sanitise `self.method`. + + Returns: + str + """ + method = self.method + if isinstance(method, bytes): + method = self.method.decode('ascii') + return method + def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] @@ -119,7 +134,7 @@ class SynapseRequest(Request): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.labels(self.method.decode('ascii'), + requests_counter.labels(self.get_method(), self.request_metrics.name).inc() @contextlib.contextmanager @@ -207,14 +222,14 @@ class SynapseRequest(Request): self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( - self.start_time, name=servlet_name, method=self.method.decode('ascii'), + self.start_time, name=servlet_name, method=self.get_method(), ) self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, - self.method.decode('ascii'), + self.get_method(), self.get_redacted_uri() ) @@ -280,7 +295,7 @@ class SynapseRequest(Request): int(usage.db_txn_count), self.sentLength, code, - self.method.decode('ascii'), + self.get_method(), self.get_redacted_uri(), self.clientproto.decode('ascii', errors='replace'), user_agent, -- cgit 1.5.1 From 7232917f12460efa2e3579331b6a97f046afe90b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 2 Oct 2018 22:53:47 +1000 Subject: Disable frozen dicts by default (#3987) --- changelog.d/3987.misc | 1 + synapse/events/__init__.py | 13 ++++++++++--- tests/replication/slave/storage/test_events.py | 6 +++++- tests/utils.py | 3 ++- 4 files changed, 18 insertions(+), 5 deletions(-) create mode 100644 changelog.d/3987.misc (limited to 'synapse') diff --git a/changelog.d/3987.misc b/changelog.d/3987.misc new file mode 100644 index 0000000000..d6b5016211 --- /dev/null +++ b/changelog.d/3987.misc @@ -0,0 +1 @@ +Disable USE_FROZEN_DICTS for unittests by default. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index b782af6308..12f1eb0a3e 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,15 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +from distutils.util import strtobool + import six from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents -# bugs where we accidentally share e.g. signature dicts. However, converting -# a dict to frozen_dicts is expensive. -USE_FROZEN_DICTS = True +# bugs where we accidentally share e.g. signature dicts. However, converting a +# dict to frozen_dicts is expensive. +# +# NOTE: This is overridden by the configuration by the Synapse worker apps, but +# for the sake of tests, it is set here while it cannot be configured on the +# homeserver object itself. +USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0")) class _EventInternalMetadata(object): diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index db44d33c68..41be5d5a1a 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from canonicaljson import encode_canonical_json + from synapse.events import FrozenEvent, _EventInternalMetadata from synapse.events.snapshot import EventContext from synapse.replication.slave.storage.events import SlavedEventStore @@ -26,7 +28,9 @@ ROOM_ID = "!room:blue" def dict_equals(self, other): - return self.__dict__ == other.__dict__ + me = encode_canonical_json(self._event_dict) + them = encode_canonical_json(other._event_dict) + return me == them def patch__eq__(cls): diff --git a/tests/utils.py b/tests/utils.py index 1ef80e7b79..dd347a0c59 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -136,6 +136,8 @@ def default_config(name): config.rc_messages_per_second = 10000 config.rc_message_burst_count = 10000 + config.use_frozen_dicts = False + # we need a sane default_room_version, otherwise attempts to create rooms will # fail. config.default_room_version = "1" @@ -182,7 +184,6 @@ def setup_test_homeserver( if config is None: config = default_config(name) - config.use_frozen_dicts = True config.ldap_enabled = False if "clock" not in kargs: -- cgit 1.5.1 From 7258d081a5523040dddf5a99ff745acf4fe238bf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Oct 2018 15:47:57 +0100 Subject: Fix bug when invalidating destination retry timings --- synapse/storage/transactions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index ab54977a75..a3032cdce9 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -216,7 +216,7 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - self._destination_retry_cache.pop(destination) + self._destination_retry_cache.pop(destination, None) return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, -- cgit 1.5.1 From da8f82008d73407896c3dd902740af0d85d38d0a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 3 Oct 2018 02:15:48 +1000 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index b1f7a89fba..312e4e2fa6 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.5.1" +__version__ = "0.33.6rc1" -- cgit 1.5.1 From ae61ade8919085ad0c90e0b54c4e8338998ee64a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 2 Oct 2018 23:33:29 +0100 Subject: Fix bug in forward_extremity update logic An event does not stop being a forward_extremity just because an outlier or rejected event refers to it. --- changelog.d/3995.bug | 1 + synapse/storage/events.py | 111 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 79 insertions(+), 33 deletions(-) create mode 100644 changelog.d/3995.bug (limited to 'synapse') diff --git a/changelog.d/3995.bug b/changelog.d/3995.bug new file mode 100644 index 0000000000..2adc36756b --- /dev/null +++ b/changelog.d/3995.bug @@ -0,0 +1 @@ +Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e7487311ce..046174edb3 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -38,6 +38,7 @@ from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id +from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder @@ -386,12 +387,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) for room_id, ev_ctx_rm in iteritems(events_by_room): - # Work out new extremities by recursively adding and removing - # the new events. latest_event_ids = yield self.get_latest_event_ids_in_room( room_id ) - new_latest_event_ids = yield self._calculate_new_extremeties( + new_latest_event_ids = yield self._calculate_new_extremities( room_id, ev_ctx_rm, latest_event_ids ) @@ -400,6 +399,12 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # No change in extremities, so no change in state continue + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + new_forward_extremeties[room_id] = new_latest_event_ids len_1 = ( @@ -517,44 +522,88 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) @defer.inlineCallbacks - def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids): - """Calculates the new forward extremeties for a room given events to + def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): + """Calculates the new forward extremities for a room given events to persist. Assumes that we are only persisting events for one room at a time. """ - new_latest_event_ids = set(latest_event_ids) - # First, add all the new events to the list - new_latest_event_ids.update( - event.event_id for event, ctx in event_contexts + + # we're only interested in new events which aren't outliers and which aren't + # being rejected. + new_events = [ + event for event, ctx in event_contexts if not event.internal_metadata.is_outlier() and not ctx.rejected + ] + + # start with the existing forward extremities + result = set(latest_event_ids) + + # add all the new events to the list + result.update( + event.event_id for event in new_events ) - # Now remove all events that are referenced by the to-be-added events - new_latest_event_ids.difference_update( + + # Now remove all events which are prev_events of any of the new events + result.difference_update( e_id - for event, ctx in event_contexts + for event in new_events for e_id, _ in event.prev_events - if not event.internal_metadata.is_outlier() and not ctx.rejected ) - # And finally remove any events that are referenced by previously added - # events. - rows = yield self._simple_select_many_batch( - table="event_edges", - column="prev_event_id", - iterable=list(new_latest_event_ids), - retcols=["prev_event_id"], - keyvalues={ - "is_state": False, - }, - desc="_calculate_new_extremeties", - ) + # Finally, remove any events which are prev_events of any existing events. + existing_prevs = yield self._get_events_which_are_prevs(result) + result.difference_update(existing_prevs) - new_latest_event_ids.difference_update( - row["prev_event_id"] for row in rows - ) + if not result: + logger.warn( + "Forward extremity list A+B-C-D is now empty in %s. " + "Old extremities (A): %s, new events (B): %s, " + "existing events which are reffed by new events (C): %s, " + "new events which are reffed by existing events (D): %s", + room_id, latest_event_ids, new_events, + [e_id for event in new_events for e_id, _ in event.prev_events], + existing_prevs, + ) + defer.returnValue(result) - defer.returnValue(new_latest_event_ids) + @defer.inlineCallbacks + def _get_events_which_are_prevs(self, event_ids): + """Filter the supplied list of event_ids to get those which are prev_events of + existing (non-outlier) events. + + Args: + event_ids (Iterable[str]): event ids to filter + + Returns: + Deferred[List[str]]: filtered event ids + """ + results = [] + + def _get_events(txn, batch): + sql = """ + SELECT prev_event_id + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND rejections.event_id IS NULL + """ % ( + ",".join("?" for _ in batch), + ) + + txn.execute(sql, batch) + results.extend(r[0] for r in txn) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events, + chunk, + ) + + defer.returnValue(results) @defer.inlineCallbacks def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids, @@ -586,10 +635,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore the new current state is only returned if we've already calculated it. """ - - if not new_latest_event_ids: - return - # map from state_group to ((type, key) -> event_id) state map state_groups_map = {} -- cgit 1.5.1 From 3e39783d5d7ca5da7b3619d0328f6aeec48854de Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 2 Oct 2018 23:44:14 +0100 Subject: remove debugging --- synapse/storage/events.py | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 046174edb3..8822dc7bcb 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -555,16 +555,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) - if not result: - logger.warn( - "Forward extremity list A+B-C-D is now empty in %s. " - "Old extremities (A): %s, new events (B): %s, " - "existing events which are reffed by new events (C): %s, " - "new events which are reffed by existing events (D): %s", - room_id, latest_event_ids, new_events, - [e_id for event in new_events for e_id, _ in event.prev_events], - existing_prevs, - ) defer.returnValue(result) @defer.inlineCallbacks -- cgit 1.5.1 From 9693625e556df1af66ba376d49411064c2d0f47e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Oct 2018 10:19:41 +0100 Subject: actually exclude outliers --- synapse/storage/events.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 8822dc7bcb..03cedf3a75 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -560,7 +560,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore @defer.inlineCallbacks def _get_events_which_are_prevs(self, event_ids): """Filter the supplied list of event_ids to get those which are prev_events of - existing (non-outlier) events. + existing (non-outlier/rejected) events. Args: event_ids (Iterable[str]): event ids to filter @@ -578,6 +578,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore LEFT JOIN rejections USING (event_id) WHERE prev_event_id IN (%s) + AND NOT events.outlier AND rejections.event_id IS NULL """ % ( ",".join("?" for _ in batch), -- cgit 1.5.1 From 7c570bff749eaf72be6f982c89facab3b16c42ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:28:01 +0100 Subject: Fix exception in background metrics collection We attempted to iterate through a list on a separate thread without doing the necessary copying. --- synapse/metrics/background_process_metrics.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 173908299c..037f1c490e 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -101,9 +101,13 @@ class _Collector(object): labels=["name"], ) - # We copy the dict so that it doesn't change from underneath us + # We copy the dict so that it doesn't change from underneath us. + # We also copy the process lists as that can also change with _bg_metrics_lock: - _background_processes_copy = dict(_background_processes) + _background_processes_copy = { + k: list(v) + for k, v in six.iteritems(_background_processes) + } for desc, processes in six.iteritems(_background_processes_copy): background_process_in_flight_count.add_metric( -- cgit 1.5.1 From 495a9d06bb21cf30376292d6592aa5fd59a52634 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:34:30 +0100 Subject: Fix exception handling in fetching remote profiles --- synapse/handlers/profile.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index f284d5a385..1dfbde84fd 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get displayname") raise - except Exception: - logger.exception("Failed to get displayname") - else: - defer.returnValue(result["displayname"]) + + defer.returnValue(result["displayname"]) @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get avatar_url") raise - except Exception: - logger.exception("Failed to get avatar_url") defer.returnValue(result["avatar_url"]) -- cgit 1.5.1 From 69e857853fe91d22fa792c9e26edd87840526c22 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 11:57:30 +0100 Subject: Fix handling of rejected threepid invites --- synapse/event_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 02fa46ef7f..c6c011b6b6 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -157,7 +157,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): raise AuthError( 403, ( "You cannot issue a third party invite for %s." % - (event.content.display_name,) + (event.content.get("display_name", ""),) ) ) else: -- cgit 1.5.1 From 52e6e815bef4379773fde257c85c4fc4d11cd181 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Oct 2018 14:13:07 +0100 Subject: Sanitise error messages when user doesn't have permission to invite --- synapse/event_auth.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/event_auth.py b/synapse/event_auth.py index c6c011b6b6..af3eee95b9 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): if user_level < invite_level: raise AuthError( - 403, ( - "You cannot issue a third party invite for %s." % - (event.content.get("display_name", ""),) - ) + 403, "You don't have permission to invite users", ) else: logger.debug("Allowing! %s", event) @@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events): if user_level < invite_level: raise AuthError( - 403, "You cannot invite user %s." % target_user_id + 403, "You don't have permission to invite users", ) elif Membership.JOIN == membership: # Joins are valid iff caller == target and they were: -- cgit 1.5.1 From a59d8996682b33454e1afcf2ab1bc23c5224e325 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Oct 2018 17:15:35 +0100 Subject: Pin to prometheus_client<0.4 to avoid renaming all of our metrics --- changelog.d/4002.misc | 1 + synapse/python_dependencies.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4002.misc (limited to 'synapse') diff --git a/changelog.d/4002.misc b/changelog.d/4002.misc new file mode 100644 index 0000000000..545abe5d58 --- /dev/null +++ b/changelog.d/4002.misc @@ -0,0 +1 @@ +Pin to prometheus_client<0.4 to avoid renaming all of our metrics \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0f339a0320..d4d983b00a 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -58,7 +58,10 @@ REQUIREMENTS = { "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six>=1.10": ["six"], - "prometheus_client>=0.0.18": ["prometheus_client"], + + # prometheus_client 0.4.0 changed the format of counter metrics + # (cf https://github.com/matrix-org/synapse/issues/4001) + "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"], # we use attr.s(slots), which arrived in 16.0.0 "attrs>=16.0.0": ["attr>=16.0.0"], -- cgit 1.5.1 From dd59dfc51ffdcb49e3ae46b9072cff86dfc32d94 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 4 Oct 2018 22:37:55 +1000 Subject: full version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 312e4e2fa6..43c5821ade 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.6rc1" +__version__ = "0.33.6" -- cgit 1.5.1 From 2dadc092b8f0e4cc1ac037ea54324efd906d4caf Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 4 Oct 2018 17:00:27 +0100 Subject: move logic into register, fix room alias localpart bug, tests --- synapse/handlers/register.py | 45 ++++++++++++------------------ tests/handlers/test_register.py | 62 ++++++++++++++++++++++++++++------------- tests/utils.py | 1 + 3 files changed, 62 insertions(+), 46 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7584064d5d..01cf7ab58e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -220,8 +220,26 @@ class RegistrationHandler(BaseHandler): # auto-join the user to any rooms we're supposed to dump them into fake_requester = create_requester(user_id) + + # try to create the room if we're the first user on the server + if self.hs.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + auto_create_rooms = count == 1 + for r in self.hs.config.auto_join_rooms: try: + if auto_create_rooms and RoomAlias.is_valid(r): + room_creation_handler = self.hs.get_room_creation_handler() + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, + ) yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) @@ -514,33 +532,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - # try to create the room if we're the first user on the server - if self.hs.config.autocreate_auto_join_rooms: - count = yield self.store.count_all_users() - if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = self.hs.get_room_creation_handler() - info = yield room_creation_handler.create_room( - requester, - config={ - "preset": "public_chat", - }, - ratelimit=False, - ) - room_id = info["room_id"] - - directory_handler = self.hs.get_handlers().directory_handler - room_alias = RoomAlias.from_string(room_identifier) - yield directory_handler.create_association( - user_id=requester.user.to_string(), - room_alias=room_alias, - room_id=room_id, - servers=[self.hs.hostname], - ) - - yield directory_handler.send_room_alias_update_event( - requester, requester.user.to_string(), room_id - ) - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 7b4ade3dfb..a150a897ac 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.errors import ResourceLimitError from synapse.handlers.register import RegistrationHandler -from synapse.types import UserID, create_requester +from synapse.types import RoomAlias, UserID, create_requester from tests.utils import setup_test_homeserver @@ -41,30 +41,28 @@ class RegistrationTestCase(unittest.TestCase): self.mock_captcha_client = Mock() self.hs = yield setup_test_homeserver( self.addCleanup, - handlers=None, - http_client=None, expire_access_token=True, - profile_handler=Mock(), ) self.macaroon_generator = Mock( generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - self.hs.handlers = RegistrationHandlers(self.hs) + # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 self.lots_of_users = 100 self.small_number_of_users = 1 + self.requester = create_requester("@requester:test") + @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): - local_part = "someone" - display_name = "someone" - user_id = "@someone:test" - requester = create_requester("@as:test") + frank = UserID.from_string("@frank:test") + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, frank.localpart, "Frankie" ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -78,12 +76,11 @@ class RegistrationTestCase(unittest.TestCase): token="jkv;g498752-43gj['eamb!-5", password_hash=None, ) - local_part = "frank" - display_name = "Frank" - user_id = "@frank:test" - requester = create_requester("@as:test") + local_part = frank.localpart + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, local_part, None ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -92,7 +89,7 @@ class RegistrationTestCase(unittest.TestCase): def test_mau_limits_when_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - yield self.handler.get_or_create_user("requester", 'a', "display_name") + yield self.handler.get_or_create_user(self.requester, 'a', "display_name") @defer.inlineCallbacks def test_get_or_create_user_mau_not_blocked(self): @@ -101,7 +98,7 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception - yield self.handler.get_or_create_user("@user:server", 'c', "User") + yield self.handler.get_or_create_user(self.requester, 'c', "User") @defer.inlineCallbacks def test_get_or_create_user_mau_blocked(self): @@ -110,13 +107,13 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.lots_of_users) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") self.store.get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") @defer.inlineCallbacks def test_register_mau_blocked(self): @@ -147,3 +144,30 @@ class RegistrationTestCase(unittest.TestCase): ) with self.assertRaises(ResourceLimitError): yield self.handler.register_saml2(localpart="local_part") + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms(self): + room_alias_str = "#room:test" + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [room_alias_str] + + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) + + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = yield directory_handler.get_association(room_alias) + + self.assertTrue(room_id['room_id'] in rooms) + self.assertEqual(len(rooms), 1) + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms_with_no_rooms(self): + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + + self.assertEqual(len(rooms), 0) diff --git a/tests/utils.py b/tests/utils.py index aaed1149c3..d34c224fb1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -149,6 +149,7 @@ def setup_test_homeserver( config.block_events_without_consent_error = None config.media_storage_providers = [] config.auto_join_rooms = [] + config.autocreate_auto_join_rooms = True config.limit_usage_by_mau = False config.hs_disabled = False config.hs_disabled_message = "" -- cgit 1.5.1 From f7199e873448794ffc88a08a9d7c01f1be0dfca1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Oct 2018 11:23:08 +0100 Subject: Log looping call exceptions If a looping call function errors, then it kills the loop entirely. Currently it throws away the exception logs, so we should make it actually log them. Fixes #3929 --- synapse/util/__init__.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 680ea928c7..c237d003bc 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools import logging from itertools import islice @@ -66,7 +67,7 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(f) + call = task.LoopingCall(_log_exception_wrapper(f)) call.clock = self._reactor call.start(msec / 1000.0, now=False) return call @@ -109,3 +110,19 @@ def batch_iter(iterable, size): sourceiter = iter(iterable) # call islice until it returns an empty tuple return iter(lambda: tuple(islice(sourceiter, size)), ()) + + +def _log_exception_wrapper(f): + """Used to wrap looping calls to log loudly if they get killed + """ + + @functools.wraps(f) + def wrap(*args, **kwargs): + try: + logger.info("Running looping call") + return f(*args, **kwargs) + except: # noqa: E722, as we reraise the exception this is fine. + logger.exception("Looping called died") + raise + + return wrap -- cgit 1.5.1 From 497444f1fdd2c39906179b1dde8c67415e465398 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 5 Oct 2018 15:08:36 +0100 Subject: Don't reuse backup versions Since we don't actually delete the keys, just mark the versions as deleted in the db rather than actually deleting them, then we won't reuse versions. Fixes https://github.com/vector-im/riot-web/issues/7448 --- synapse/storage/e2e_room_keys.py | 9 +++++++-- synapse/storage/schema/delta/51/e2e_room_keys.sql | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 969f4aef9c..4d439bb164 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -193,7 +193,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): @staticmethod def _get_current_version(txn, user_id): txn.execute( - "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", + "SELECT MAX(version) FROM e2e_room_keys_versions " + "WHERE user_id=? AND deleted=0", (user_id,) ) row = txn.fetchone() @@ -226,6 +227,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): keyvalues={ "user_id": user_id, "version": this_version, + "deleted": 0, }, retcols=( "version", @@ -300,13 +302,16 @@ class EndToEndRoomKeyStore(SQLBaseStore): else: this_version = version - return self._simple_delete_one_txn( + return self._simple_update_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": this_version, }, + updatevalues={ + "deleted": 1, + } ) return self.runInteraction( diff --git a/synapse/storage/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/delta/51/e2e_room_keys.sql index 4531fd56ee..c0e66a697d 100644 --- a/synapse/storage/schema/delta/51/e2e_room_keys.sql +++ b/synapse/storage/schema/delta/51/e2e_room_keys.sql @@ -32,7 +32,8 @@ CREATE TABLE e2e_room_keys_versions ( user_id TEXT NOT NULL, version TEXT NOT NULL, algorithm TEXT NOT NULL, - auth_data TEXT NOT NULL + auth_data TEXT NOT NULL, + deleted SMALLINT DEFAULT 0 NOT NULL ); CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); -- cgit 1.5.1 From 8a1817f0d29308a233783d43cbf1ad27891120c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Oct 2018 13:26:54 +0100 Subject: Use errback pattern and catch async failures --- synapse/handlers/appservice.py | 7 ++++++- synapse/util/__init__.py | 43 ++++++++++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 15 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index f0f89af7dc..16b897eb18 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -28,6 +28,7 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import make_log_failure_errback from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure @@ -112,7 +113,11 @@ class ApplicationServicesHandler(object): if not self.started_scheduler: def start_scheduler(): - return self.scheduler.start().addErrback(log_failure) + return self.scheduler.start().addErrback( + make_log_failure_errback( + "Application Services Failure", + ) + ) run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index c237d003bc..964078aed4 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools import logging from itertools import islice @@ -67,9 +66,12 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(_log_exception_wrapper(f)) + call = task.LoopingCall(f) call.clock = self._reactor - call.start(msec / 1000.0, now=False) + d = call.start(msec / 1000.0, now=False) + d.addErrback(make_log_failure_errback( + "Looping call died", consumeErrors=False, + )) return call def call_later(self, delay, callback, *args, **kwargs): @@ -112,17 +114,30 @@ def batch_iter(iterable, size): return iter(lambda: tuple(islice(sourceiter, size)), ()) -def _log_exception_wrapper(f): - """Used to wrap looping calls to log loudly if they get killed +def make_log_failure_errback(msg, consumeErrors=True): + """Creates a function suitable for passing to `Deferred.addErrback` that + logs any failures that occur. + + Args: + msg (str): Message to log + consumeErrors (bool): If true consumes the failure, otherwise passes + on down the callback chain + + Returns: + func(Failure) """ - @functools.wraps(f) - def wrap(*args, **kwargs): - try: - logger.info("Running looping call") - return f(*args, **kwargs) - except: # noqa: E722, as we reraise the exception this is fine. - logger.exception("Looping called died") - raise + def log_failure(failure): + logger.error( + msg, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject() + ) + ) + + if not consumeErrors: + return failure - return wrap + return log_failure -- cgit 1.5.1 From 495975e231bd429eb0114d9258423a5e202dcb2b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Oct 2018 13:44:58 +0100 Subject: Optimisation for filter_events_for_server We're better off hashing just the event_id than the whole ((type, state_key), event_id) tuple - so use a dict instead of a set. Also, iteritems > items. --- changelog.d/4017.misc | 1 + synapse/visibility.py | 13 ++++++------- 2 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 changelog.d/4017.misc (limited to 'synapse') diff --git a/changelog.d/4017.misc b/changelog.d/4017.misc new file mode 100644 index 0000000000..b1ceb06560 --- /dev/null +++ b/changelog.d/4017.misc @@ -0,0 +1 @@ +Optimisation for serving federation requests \ No newline at end of file diff --git a/synapse/visibility.py b/synapse/visibility.py index d4680863d3..c64ad2144c 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -324,14 +324,13 @@ def filter_events_for_server(store, server_name, events): # server's domain. # # event_to_state_ids contains lots of duplicates, so it turns out to be - # cheaper to build a complete set of unique - # ((type, state_key), event_id) tuples, and then filter out the ones we - # don't want. + # cheaper to build a complete event_id => (type, state_key) dict, and then + # filter out the ones we don't want # - state_key_to_event_id_set = { - e + event_id_to_state_key = { + event_id: key for key_to_eid in itervalues(event_to_state_ids) - for e in key_to_eid.items() + for key, event_id in iteritems(key_to_eid) } def include(typ, state_key): @@ -346,7 +345,7 @@ def filter_events_for_server(store, server_name, events): event_map = yield store.get_events([ e_id - for key, e_id in state_key_to_event_id_set + for e_id, key in iteritems(event_id_to_state_key) if include(key[0], key[1]) ]) -- cgit 1.5.1 From 69823205722662a72e4203ec304ff595a0f6ecf7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Oct 2018 14:06:19 +0100 Subject: Remove unnecessary extra function call layer --- synapse/handlers/appservice.py | 18 +++--------------- synapse/util/__init__.py | 29 +++++++++++++---------------- 2 files changed, 16 insertions(+), 31 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 16b897eb18..17eedf4dbf 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -28,7 +28,7 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import make_log_failure_errback +from synapse.util import log_failure from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure @@ -37,17 +37,6 @@ logger = logging.getLogger(__name__) events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "") -def log_failure(failure): - logger.error( - "Application Services Failure", - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject() - ) - ) - - class ApplicationServicesHandler(object): def __init__(self, hs): @@ -114,10 +103,9 @@ class ApplicationServicesHandler(object): if not self.started_scheduler: def start_scheduler(): return self.scheduler.start().addErrback( - make_log_failure_errback( - "Application Services Failure", - ) + log_failure, "Application Services Failure", ) + run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 964078aed4..9a8fae0497 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -69,9 +69,9 @@ class Clock(object): call = task.LoopingCall(f) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) - d.addErrback(make_log_failure_errback( - "Looping call died", consumeErrors=False, - )) + d.addErrback( + log_failure, "Looping call died", consumeErrors=False, + ) return call def call_later(self, delay, callback, *args, **kwargs): @@ -114,7 +114,7 @@ def batch_iter(iterable, size): return iter(lambda: tuple(islice(sourceiter, size)), ()) -def make_log_failure_errback(msg, consumeErrors=True): +def log_failure(failure, msg, consumeErrors=True): """Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. @@ -127,17 +127,14 @@ def make_log_failure_errback(msg, consumeErrors=True): func(Failure) """ - def log_failure(failure): - logger.error( - msg, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject() - ) + logger.error( + msg, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject() ) + ) - if not consumeErrors: - return failure - - return log_failure + if not consumeErrors: + return failure -- cgit 1.5.1 From 0c905ee015fdbf0d61f46db600fd79a18d0b6376 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 09:39:13 +0100 Subject: be python3 compatible --- synapse/handlers/e2e_room_keys.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 2c330382cf..117144bb6e 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -15,6 +15,7 @@ import logging +from six import iteritems from twisted.internet import defer from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError @@ -137,8 +138,8 @@ class E2eRoomKeysHandler(object): # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. - for room_id, room in room_keys['rooms'].iteritems(): - for session_id, session in room['sessions'].iteritems(): + for room_id, room in iteritems(room_keys['rooms']): + for session_id, session in iteritems(room['sessions']): yield self._upload_room_key( user_id, version, room_id, session_id, session ) -- cgit 1.5.1 From f4a4dbcad106b41e3307eb5ca541ff3087b788cf Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 09:47:04 +0100 Subject: Apparently this blank line is Very Important --- synapse/handlers/e2e_room_keys.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 117144bb6e..bf2a83cc31 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -16,6 +16,7 @@ import logging from six import iteritems + from twisted.internet import defer from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError -- cgit 1.5.1 From d3464ce708cd857216d88d3e67867f1cf0c8bf60 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 10:33:59 +0100 Subject: isort --- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index ea114bc8b4..539893a5d6 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, -- cgit 1.5.1 From bdc27d6716d14128e25737865cc36c8adf42aeaa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Oct 2018 14:15:49 +0100 Subject: Add metric to count lazy member sync requests --- synapse/handlers/sync.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 67b8ca28c7..58edf21472 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -20,6 +20,8 @@ import logging from six import iteritems, itervalues +from prometheus_client import Counter + from twisted.internet import defer from synapse.api.constants import EventTypes, Membership @@ -36,6 +38,13 @@ from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) + +# Counts the number of times we got asked for a lazy loaded sync. Type is one of +# initial_sync, full_sate_sync or incremental_sync +lazy_member_sync_counter = Counter( + "synapse_handlers_sync_lazy_member_sync", "", ["type"], +) + # Store the cache that tracks which lazy-loaded members have been sent to a given # client for no more than 30 minutes. LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000 @@ -227,14 +236,19 @@ class SyncHandler(object): @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): + if since_token is None: + sync_type = "initial_sync" + elif full_state: + sync_type = "full_state_sync" + else: + sync_type = "incremental_sync" + context = LoggingContext.current_context() if context: - if since_token is None: - context.tag = "initial_sync" - elif full_state: - context.tag = "full_state_sync" - else: - context.tag = "incremental_sync" + context.tag = sync_type + + if sync_config.filter_collection.lazy_load_members(): + lazy_member_sync_counter.labels(sync_type).inc() if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling -- cgit 1.5.1 From b8d9e108be60b3d14a57238562bfb5f49781c2a6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 9 Oct 2018 18:04:21 +0100 Subject: Fix mergefail --- synapse/api/errors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 4124469442..0a6e78711f 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -344,7 +344,7 @@ class IncompatibleRoomVersionError(SynapseError): return cs_error( self.msg, self.errcode, - room_version=self.current_version, + room_version=self._room_version, ) -- cgit 1.5.1 From 395276b4051123d369aab5deab020f816479c63b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 09:24:39 +0100 Subject: Append _total to metric and fix up spelling --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 58edf21472..41daa1d888 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -40,9 +40,9 @@ logger = logging.getLogger(__name__) # Counts the number of times we got asked for a lazy loaded sync. Type is one of -# initial_sync, full_sate_sync or incremental_sync +# initial_sync, full_state_sync or incremental_sync lazy_member_sync_counter = Counter( - "synapse_handlers_sync_lazy_member_sync", "", ["type"], + "synapse_handlers_sync_lazy_member_sync_total", "", ["type"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given -- cgit 1.5.1 From 3cbe8331e60559008875679fab51423eedb242c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 11:23:17 +0100 Subject: Track number of non-empty sync responses instead --- synapse/handlers/sync.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 41daa1d888..5cae48436f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -39,10 +39,12 @@ from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) -# Counts the number of times we got asked for a lazy loaded sync. Type is one of -# initial_sync, full_state_sync or incremental_sync -lazy_member_sync_counter = Counter( - "synapse_handlers_sync_lazy_member_sync_total", "", ["type"], +# Counts the number of times we returned a non-empty sync. `type` is one of +# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is +# "true" or "false" depending on if the request asked for lazy loaded members or +# not. +non_empty_sync_counter = Counter( + "synapse_handlers_sync_nonempty_total", "", ["type", "lazy_loaded"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given @@ -247,16 +249,12 @@ class SyncHandler(object): if context: context.tag = sync_type - if sync_config.filter_collection.lazy_load_members(): - lazy_member_sync_counter.labels(sync_type).inc() - if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. result = yield self.current_sync_for_user( sync_config, since_token, full_state=full_state, ) - defer.returnValue(result) else: def current_sync_callback(before_token, after_token): return self.current_sync_for_user(sync_config, since_token) @@ -265,7 +263,15 @@ class SyncHandler(object): sync_config.user.to_string(), timeout, current_sync_callback, from_token=since_token, ) - defer.returnValue(result) + + if result: + if sync_config.filter_collection.lazy_load_members(): + lazy_loaded = "true" + else: + lazy_loaded = "false" + non_empty_sync_counter.labels(sync_type, lazy_loaded).inc() + + defer.returnValue(result) def current_sync_for_user(self, sync_config, since_token=None, full_state=False): -- cgit 1.5.1 From 7e561b5c1a0cf0177f14e67851bb7e0ffaeda042 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Oct 2018 11:40:43 +0100 Subject: Add description to counter metric --- synapse/handlers/sync.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5cae48436f..351892a94f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -44,7 +44,11 @@ logger = logging.getLogger(__name__) # "true" or "false" depending on if the request asked for lazy loaded members or # not. non_empty_sync_counter = Counter( - "synapse_handlers_sync_nonempty_total", "", ["type", "lazy_loaded"], + "synapse_handlers_sync_nonempty_total", + "Count of non empty sync responses. type is initial_sync/full_state_sync" + "/incremental_sync. lazy_loaded indicates if lazy loaded members were " + "enabled for that request.", + ["type", "lazy_loaded"], ) # Store the cache that tracks which lazy-loaded members have been sent to a given -- cgit 1.5.1 From 8ddd0f273c501b83b75a58e379d73aef3cfab35e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 09:55:41 +0100 Subject: Comments on get_all_new_events_stream just some docstrings to clarify the behaviour here --- synapse/storage/stream.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 4c296d72c0..d6cfdba519 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -630,7 +630,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_all_new_events_stream(self, from_id, current_id, limit): - """Get all new events""" + """Get all new events + + Returns all events with from_id < stream_ordering <= current_id. + + Args: + from_id (int): the stream_ordering of the last event we processed + current_id (int): the stream_ordering of the most recently processed event + limit (int): the maximum number of events to return + + Returns: + Deferred[Tuple[int, list[FrozenEvent]]]: A tuple of (next_id, events), where + `next_id` is the next value to pass as `from_id` (it will either be the + stream_ordering of the last returned event, or, if fewer than `limit` events + were found, `current_id`. + """ def get_all_new_events_stream_txn(txn): sql = ( -- cgit 1.5.1 From 83e72bb2f0c6ef282190a378941c856afbb33c16 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:26:18 +0100 Subject: PR feedback pt. 1 --- synapse/api/errors.py | 8 ------ synapse/handlers/e2e_room_keys.py | 41 ++++++++++++++++--------------- synapse/rest/client/v2_alpha/room_keys.py | 2 +- 3 files changed, 22 insertions(+), 29 deletions(-) (limited to 'synapse') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 0a6e78711f..48b903374d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -348,14 +348,6 @@ class IncompatibleRoomVersionError(SynapseError): ) -def cs_exception(exception): - if isinstance(exception, CodeMessageException): - return exception.error_dict() - else: - logger.error("Unknown exception type: %s", type(exception)) - return {} - - def cs_error(msg, code=Codes.UNKNOWN, **kwargs): """ Utility method for constructing an error response for client-server interactions. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index bf2a83cc31..4e3141dac8 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,11 @@ class E2eRoomKeysHandler(object): room, or a given session. See EndToEndRoomKeyStore.get_e2e_room_keys for full details. + Args: + user_id(str): the user whose keys we're getting + version(str): the version ID of the backup we're getting keys from + room_id(string): room ID to get keys for, for None to get keys for all rooms + session_id(string): session ID to get keys for, for None to get keys for all sessions Returns: A deferred list of dicts giving the session_data and message metadata for these room keys. @@ -72,6 +77,11 @@ class E2eRoomKeysHandler(object): room or a given session. See EndToEndRoomKeyStore.delete_e2e_room_keys for full details. + Args: + user_id(str): the user whose backup we're deleting + version(str): the version ID of the backup we're deleting + room_id(string): room ID to delete keys for, for None to delete keys for all rooms + session_id(string): session ID to delete keys for, for None to delete keys for all sessions Returns: A deferred of the deletion transaction """ @@ -118,24 +128,24 @@ class E2eRoomKeysHandler(object): # Check that the version we're trying to upload is the current version try: - version_info = yield self._get_version_info_unlocked(user_id) + version_info = yield self.store.get_e2e_room_keys_version_info(user_id) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) else: - raise e + raise if version_info['version'] != version: # Check that the version we're trying to upload actually exists try: - version_info = yield self._get_version_info_unlocked(user_id, version) + version_info = yield self.store.get_e2e_room_keys_version_info(user_id, version) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info['version']) except StoreError as e: if e.code == 404: raise SynapseError(404, "Version '%s' not found" % (version,)) else: - raise e + raise # go through the room_keys. # XXX: this should/could be done concurrently, given we're in a lock. @@ -168,9 +178,9 @@ class E2eRoomKeysHandler(object): if e.code == 404: pass else: - raise e + raise - if E2eRoomKeysHandler._should_replace_room_key(current_room_key, room_key): + if self._should_replace_room_key(current_room_key, room_key): yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) @@ -195,14 +205,14 @@ class E2eRoomKeysHandler(object): # purely for legibility. if room_key['is_verified'] and not current_room_key['is_verified']: - pass + return True elif ( room_key['first_message_index'] < current_room_key['first_message_index'] ): - pass + return True elif room_key['forwarded_count'] < current_room_key['forwarded_count']: - pass + return True else: return False return True @@ -256,18 +266,9 @@ class E2eRoomKeysHandler(object): """ with (yield self._upload_linearizer.queue(user_id)): - res = yield self._get_version_info_unlocked(user_id, version) + res = yield self.store.get_e2e_room_keys_version_info(user_id, version) defer.returnValue(res) - @defer.inlineCallbacks - def _get_version_info_unlocked(self, user_id, version=None): - """Get the info about a given version of the user's backup - without obtaining the upload_linearizer lock. For params see get_version_info - """ - - results = yield self.store.get_e2e_room_keys_version_info(user_id, version) - defer.returnValue(results) - @defer.inlineCallbacks def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 539893a5d6..4807170ea6 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -- cgit 1.5.1 From 86ef9760a786e7de128120c11d623e3303178505 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:35:08 +0100 Subject: Split /room_keys/version into 2 servlets --- synapse/rest/client/v2_alpha/room_keys.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 4807170ea6..aee6419179 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -240,9 +240,9 @@ class RoomKeysServlet(RestServlet): defer.returnValue((200, {})) -class RoomKeysVersionServlet(RestServlet): +class RoomKeysNewVersionServlet(RestServlet): PATTERNS = client_v2_patterns( - "/room_keys/version(/(?P[^/]+))?$" + "/room_keys/version$" ) def __init__(self, hs): @@ -250,12 +250,12 @@ class RoomKeysVersionServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysVersionServlet, self).__init__() + super(RoomKeysNewVersionServlet, self).__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @defer.inlineCallbacks - def on_POST(self, request, version): + def on_POST(self, request): """ Create a new backup version for this user's room_keys with the given info. The version is allocated by the server and returned to the user @@ -285,10 +285,6 @@ class RoomKeysVersionServlet(RestServlet): "version": 12345 } """ - - if version: - raise SynapseError(405, "Cannot POST to a specific version") - requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() info = parse_json_object_from_request(request) @@ -301,6 +297,20 @@ class RoomKeysVersionServlet(RestServlet): # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning +class RoomKeysVersionServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/room_keys/version(/(?P[^/]+))?$" + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RoomKeysVersionServlet, self).__init__() + self.auth = hs.get_auth() + self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + @defer.inlineCallbacks def on_GET(self, request, version): """ @@ -320,7 +330,6 @@ class RoomKeysVersionServlet(RestServlet): "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" } """ - requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -346,6 +355,8 @@ class RoomKeysVersionServlet(RestServlet): HTTP/1.1 200 OK {} """ + if version is None: + raise SynapseError(400, "No version specified to delete") requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -359,3 +370,4 @@ class RoomKeysVersionServlet(RestServlet): def register_servlets(hs, http_server): RoomKeysServlet(hs).register(http_server) RoomKeysVersionServlet(hs).register(http_server) + RoomKeysNewVersionServlet(hs).register(http_server) -- cgit 1.5.1 From bddfad253a2ca8ea0645d10e89055c4ce2ff1c38 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:48:02 +0100 Subject: Don't mangle exceptions --- synapse/rest/client/v2_alpha/room_keys.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index aee6419179..8d006d819d 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -339,9 +339,7 @@ class RoomKeysVersionServlet(RestServlet): ) except SynapseError as e: if e.code == 404: - e.errcode = Codes.NOT_FOUND - e.msg = "No backup found" - raise e + raise SynapseError(404, "No backup found", Codes.NOT_FOUND) defer.returnValue((200, info)) @defer.inlineCallbacks @@ -356,7 +354,7 @@ class RoomKeysVersionServlet(RestServlet): {} """ if version is None: - raise SynapseError(400, "No version specified to delete") + raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND) requester = yield self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() -- cgit 1.5.1 From 306361b31bc0fb229de8db29209b9da38374d6f2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 11:48:56 +0100 Subject: Misc PR feedback bits --- synapse/storage/e2e_room_keys.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 4d439bb164..f25ded2295 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import simplejson as json +import json from twisted.internet import defer @@ -76,7 +76,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): session_id(str): the session whose room_key we're setting room_key(dict): the room_key being set Raises: - StoreError if stuff goes wrong, probably + StoreError """ yield self._simple_upsert( -- cgit 1.5.1 From 8c0ff0287af19cbae40fe7dd413f1e50578908c3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 13:47:43 +0100 Subject: Linting soothes the savage PEP8 monster --- synapse/handlers/e2e_room_keys.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 4e3141dac8..5edb3cfe04 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -53,7 +53,8 @@ class E2eRoomKeysHandler(object): user_id(str): the user whose keys we're getting version(str): the version ID of the backup we're getting keys from room_id(string): room ID to get keys for, for None to get keys for all rooms - session_id(string): session ID to get keys for, for None to get keys for all sessions + session_id(string): session ID to get keys for, for None to get keys for all + sessions Returns: A deferred list of dicts giving the session_data and message metadata for these room keys. @@ -80,8 +81,10 @@ class E2eRoomKeysHandler(object): Args: user_id(str): the user whose backup we're deleting version(str): the version ID of the backup we're deleting - room_id(string): room ID to delete keys for, for None to delete keys for all rooms - session_id(string): session ID to delete keys for, for None to delete keys for all sessions + room_id(string): room ID to delete keys for, for None to delete keys for all + rooms + session_id(string): session ID to delete keys for, for None to delete keys + for all sessions Returns: A deferred of the deletion transaction """ @@ -138,7 +141,9 @@ class E2eRoomKeysHandler(object): if version_info['version'] != version: # Check that the version we're trying to upload actually exists try: - version_info = yield self.store.get_e2e_room_keys_version_info(user_id, version) + version_info = yield self.store.get_e2e_room_keys_version_info( + user_id, version, + ) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info['version']) except StoreError as e: -- cgit 1.5.1 From 381d2cfdf0f02935b743f4b6dc1b5133d7ed27b7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 13 Oct 2018 00:14:08 +1100 Subject: Make workers work on Py3 (#4027) --- changelog.d/4027.bugfix | 1 + synapse/app/_base.py | 30 ++++++++++++------------ synapse/app/event_creator.py | 3 +++ synapse/app/pusher.py | 15 ++++++------ synapse/app/synchrotron.py | 12 +++++----- synapse/python_dependencies.py | 3 --- synapse/replication/slave/storage/_base.py | 9 +++++++ synapse/replication/slave/storage/deviceinbox.py | 12 +++++----- synapse/replication/slave/storage/devices.py | 11 +-------- synapse/replication/slave/storage/groups.py | 8 +++---- synapse/replication/slave/storage/keys.py | 14 +++++------ synapse/replication/slave/storage/presence.py | 6 ++--- synctl | 2 +- 13 files changed, 64 insertions(+), 62 deletions(-) create mode 100644 changelog.d/4027.bugfix (limited to 'synapse') diff --git a/changelog.d/4027.bugfix b/changelog.d/4027.bugfix new file mode 100644 index 0000000000..c9bbff3f68 --- /dev/null +++ b/changelog.d/4027.bugfix @@ -0,0 +1 @@ +Workers now start on Python 3. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 7c866e246a..18584226e9 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -17,6 +17,7 @@ import gc import logging import sys +import psutil from daemonize import Daemonize from twisted.internet import error, reactor @@ -24,12 +25,6 @@ from twisted.internet import error, reactor from synapse.util import PreserveLoggingContext from synapse.util.rlimit import change_resource_limit -try: - import affinity -except Exception: - affinity = None - - logger = logging.getLogger(__name__) @@ -89,15 +84,20 @@ def start_reactor( with PreserveLoggingContext(): logger.info("Running") if cpu_affinity is not None: - if not affinity: - quit_with_error( - "Missing package 'affinity' required for cpu_affinity\n" - "option\n\n" - "Install by running:\n\n" - " pip install affinity\n\n" - ) - logger.info("Setting CPU affinity to %s" % cpu_affinity) - affinity.set_process_affinity_mask(0, cpu_affinity) + # Turn the bitmask into bits, reverse it so we go from 0 up + mask_to_bits = bin(cpu_affinity)[2:][::-1] + + cpus = [] + cpu_num = 0 + + for i in mask_to_bits: + if i == "1": + cpus.append(cpu_num) + cpu_num += 1 + + p = psutil.Process() + p.cpu_affinity(cpus) + change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 9060ab14f6..e4a68715aa 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -178,6 +178,9 @@ def start(config_options): setup_logging(config, use_worker_options=True) + # This should only be done on the user directory worker or the master + config.update_user_directory = False + events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 630dcda478..0f9f8e19f6 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage._base import __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.pushers import SlavedPusherStore @@ -49,31 +50,31 @@ class PusherSlaveStore( SlavedAccountDataStore ): update_pusher_last_stream_ordering_and_success = ( - DataStore.update_pusher_last_stream_ordering_and_success.__func__ + __func__(DataStore.update_pusher_last_stream_ordering_and_success) ) update_pusher_failing_since = ( - DataStore.update_pusher_failing_since.__func__ + __func__(DataStore.update_pusher_failing_since) ) update_pusher_last_stream_ordering = ( - DataStore.update_pusher_last_stream_ordering.__func__ + __func__(DataStore.update_pusher_last_stream_ordering) ) get_throttle_params_by_room = ( - DataStore.get_throttle_params_by_room.__func__ + __func__(DataStore.get_throttle_params_by_room) ) set_throttle_params = ( - DataStore.set_throttle_params.__func__ + __func__(DataStore.set_throttle_params) ) get_time_of_last_push_action_before = ( - DataStore.get_time_of_last_push_action_before.__func__ + __func__(DataStore.get_time_of_last_push_action_before) ) get_profile_displayname = ( - DataStore.get_profile_displayname.__func__ + __func__(DataStore.get_profile_displayname) ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 9a7fc6ee9d..3926c7f263 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -33,7 +33,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource -from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore @@ -147,7 +147,7 @@ class SynchrotronPresence(object): and haven't come back yet. If there are poke the master about them. """ now = self.clock.time_msec() - for user_id, last_sync_ms in self.users_going_offline.items(): + for user_id, last_sync_ms in list(self.users_going_offline.items()): if now - last_sync_ms > 10 * 1000: self.users_going_offline.pop(user_id, None) self.send_user_sync(user_id, False, last_sync_ms) @@ -156,9 +156,9 @@ class SynchrotronPresence(object): # TODO Hows this supposed to work? pass - get_states = PresenceHandler.get_states.__func__ - get_state = PresenceHandler.get_state.__func__ - current_state_for_users = PresenceHandler.current_state_for_users.__func__ + get_states = __func__(PresenceHandler.get_states) + get_state = __func__(PresenceHandler.get_state) + current_state_for_users = __func__(PresenceHandler.current_state_for_users) def user_syncing(self, user_id, affect_presence): if affect_presence: @@ -208,7 +208,7 @@ class SynchrotronPresence(object): ) for row in rows] for state in states: - self.user_to_current_state[row.user_id] = state + self.user_to_current_state[state.user_id] = state stream_id = token yield self.notify_from_replication(states, stream_id) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index d4d983b00a..2947f37f1a 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -82,9 +82,6 @@ CONDITIONAL_REQUIREMENTS = { "psutil": { "psutil>=2.0.0": ["psutil>=2.0.0"], }, - "affinity": { - "affinity": ["affinity"], - }, "postgres": { "psycopg2>=2.6": ["psycopg2"] } diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 3f7be74e02..2d81d49e9a 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -15,6 +15,8 @@ import logging +import six + from synapse.storage._base import SQLBaseStore from synapse.storage.engines import PostgresEngine @@ -23,6 +25,13 @@ from ._slaved_id_tracker import SlavedIdTracker logger = logging.getLogger(__name__) +def __func__(inp): + if six.PY3: + return inp + else: + return inp.__func__ + + class BaseSlavedStore(SQLBaseStore): def __init__(self, db_conn, hs): super(BaseSlavedStore, self).__init__(db_conn, hs) diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 87eaa53004..4f19fd35aa 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -17,7 +17,7 @@ from synapse.storage import DataStore from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -43,11 +43,11 @@ class SlavedDeviceInboxStore(BaseSlavedStore): expiry_ms=30 * 60 * 1000, ) - get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__ - get_new_messages_for_device = DataStore.get_new_messages_for_device.__func__ - get_new_device_msgs_for_remote = DataStore.get_new_device_msgs_for_remote.__func__ - delete_messages_for_device = DataStore.delete_messages_for_device.__func__ - delete_device_msgs_for_remote = DataStore.delete_device_msgs_for_remote.__func__ + get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token) + get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device) + get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote) + delete_messages_for_device = __func__(DataStore.delete_messages_for_device) + delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote) def stream_positions(self): result = super(SlavedDeviceInboxStore, self).stream_positions() diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 21b8c468fa..ec2fd561cc 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -13,23 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from synapse.storage import DataStore from synapse.storage.end_to_end_keys import EndToEndKeyStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker -def __func__(inp): - if six.PY3: - return inp - else: - return inp.__func__ - - class SlavedDeviceStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedDeviceStore, self).__init__(db_conn, hs) diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 5777f07c8d..e933b170bb 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -16,7 +16,7 @@ from synapse.storage import DataStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -33,9 +33,9 @@ class SlavedGroupServerStore(BaseSlavedStore): "_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(), ) - get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__ - get_group_stream_token = DataStore.get_group_stream_token.__func__ - get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__ + get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user) + get_group_stream_token = __func__(DataStore.get_group_stream_token) + get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user) def stream_positions(self): result = super(SlavedGroupServerStore, self).stream_positions() diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index 05ed168463..8032f53fec 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -16,7 +16,7 @@ from synapse.storage import DataStore from synapse.storage.keys import KeyStore -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ class SlavedKeyStore(BaseSlavedStore): @@ -24,11 +24,11 @@ class SlavedKeyStore(BaseSlavedStore): "_get_server_verify_key" ] - get_server_verify_keys = DataStore.get_server_verify_keys.__func__ - store_server_verify_key = DataStore.store_server_verify_key.__func__ + get_server_verify_keys = __func__(DataStore.get_server_verify_keys) + store_server_verify_key = __func__(DataStore.store_server_verify_key) - get_server_certificate = DataStore.get_server_certificate.__func__ - store_server_certificate = DataStore.store_server_certificate.__func__ + get_server_certificate = __func__(DataStore.get_server_certificate) + store_server_certificate = __func__(DataStore.store_server_certificate) - get_server_keys_json = DataStore.get_server_keys_json.__func__ - store_server_keys_json = DataStore.store_server_keys_json.__func__ + get_server_keys_json = __func__(DataStore.get_server_keys_json) + store_server_keys_json = __func__(DataStore.store_server_keys_json) diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 80b744082a..92447b00d4 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -17,7 +17,7 @@ from synapse.storage import DataStore from synapse.storage.presence import PresenceStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore +from ._base import BaseSlavedStore, __func__ from ._slaved_id_tracker import SlavedIdTracker @@ -34,8 +34,8 @@ class SlavedPresenceStore(BaseSlavedStore): "PresenceStreamChangeCache", self._presence_id_gen.get_current_token() ) - _get_active_presence = DataStore._get_active_presence.__func__ - take_presence_startup_info = DataStore.take_presence_startup_info.__func__ + _get_active_presence = __func__(DataStore._get_active_presence) + take_presence_startup_info = __func__(DataStore.take_presence_startup_info) _get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"] get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"] diff --git a/synctl b/synctl index 356e5cb6a7..09b64459b1 100755 --- a/synctl +++ b/synctl @@ -280,7 +280,7 @@ def main(): if worker.cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - for cache_name, factor in worker.cache_factors.iteritems(): + for cache_name, factor in iteritems(worker.cache_factors): os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) start_worker(worker.app, configfile, worker.configfile) -- cgit 1.5.1 From a45f2c3a002dd50ba4dc6dcbe45a51f2da4057b2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 12 Oct 2018 14:33:55 +0100 Subject: missed one --- synapse/rest/client/v2_alpha/room_keys.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8d006d819d..45b5817d8b 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -297,6 +297,7 @@ class RoomKeysNewVersionServlet(RestServlet): # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning + class RoomKeysVersionServlet(RestServlet): PATTERNS = client_v2_patterns( "/room_keys/version(/(?P[^/]+))?$" -- cgit 1.5.1 From a2bfb778c8a2a91bdff1a5824571d91f4f4536d3 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 12 Oct 2018 18:17:36 +0100 Subject: improve auto room join logic, comments and tests --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 11 ++++++++++- synapse/handlers/register.py | 11 ++++++++--- tests/handlers/test_register.py | 21 +++++++++++++++++---- 4 files changed, 36 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 5cd8ad6cca..496ba4f4a0 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -First user should autocreate autojoin rooms +Servers with auto join rooms, should autocreate those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index dcf2374ed2..43ff20a637 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,6 +15,8 @@ from distutils.util import strtobool +from synapse.config._base import ConfigError +from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols from ._base import Config @@ -44,6 +46,9 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + for room_alias in self.auto_join_rooms: + if not RoomAlias.is_valid(room_alias): + raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -100,7 +105,11 @@ class RegistrationConfig(Config): #auto_join_rooms: # - "#example:example.com" - # Have first user on server autocreate autojoin rooms + # Where auto_join_rooms are specified, setting this flag ensures that the + # the rooms exists by creating them when the first user on the + # homeserver registers. + # Setting to false means that if the rooms are not manually created, + # users cannot be auto joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 01cf7ab58e..2b269ab692 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) +from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -222,14 +223,19 @@ class RegistrationHandler(BaseHandler): fake_requester = create_requester(user_id) # try to create the room if we're the first user on the server + should_auto_create_rooms = False if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() - auto_create_rooms = count == 1 + should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: try: - if auto_create_rooms and RoomAlias.is_valid(r): + if should_auto_create_rooms: room_creation_handler = self.hs.get_room_creation_handler() + if self.hs.hostname != RoomAlias.from_string(r).domain: + raise ConfigError( + 'Cannot create room alias %s, it does not match server domain' + ) # create room expects the localpart of the room alias room_alias_localpart = RoomAlias.from_string(r).localpart yield room_creation_handler.create_room( @@ -531,7 +537,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index a150a897ac..3e9a190727 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -47,7 +47,6 @@ class RegistrationTestCase(unittest.TestCase): generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 @@ -148,9 +147,7 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [room_alias_str] - res = yield self.handler.register(localpart='jeff') rooms = yield self.store.get_rooms_for_user(res[0]) @@ -163,11 +160,27 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms_with_no_rooms(self): - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") res = yield self.handler.register(frank.localpart) self.assertEqual(res[0], frank.to_string()) rooms = yield self.store.get_rooms_for_user(res[0]) + self.assertEqual(len(rooms), 0) + + @defer.inlineCallbacks + def test_auto_create_auto_join_where_room_is_another_domain(self): + self.hs.config.auto_join_rooms = ["#room:another"] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + self.assertEqual(len(rooms), 0) + @defer.inlineCallbacks + def test_auto_create_auto_join_where_auto_create_is_false(self): + self.hs.config.autocreate_auto_join_rooms = False + room_alias_str = "#room:test" + self.hs.config.auto_join_rooms = [room_alias_str] + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) self.assertEqual(len(rooms), 0) -- cgit 1.5.1 From 7bb651de6a29e4b261b67aace1e0d5e4286b2e69 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 14:53:30 -0600 Subject: More sane handling of group errors and pep8 --- synapse/handlers/groups_local.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 68de8a8e53..66da6edad1 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import (SynapseError, HttpResponseException) +from synapse.api.errors import (HttpResponseException, SynapseError) from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -40,12 +40,13 @@ def _create_rerouter(func_name): d = getattr(self.transport_client, func_name)( destination, group_id, *args, **kwargs ) + def h(failure): failure.trap(HttpResponseException) e = failure.value - if e.code >= 400 and e.code < 500: - raise SynapseError(e.code, e.msg) - failure.raiseException() + if e.code == 403: + raise e.to_synapse_error() + return failure d.addErrback(h) return d return f -- cgit 1.5.1 From 164f8e484356f0dc8bc32f30f668d4b69b46000b Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 15:11:59 -0600 Subject: isort --- synapse/handlers/groups_local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 66da6edad1..cb7e7d49cb 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -20,7 +20,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import (HttpResponseException, SynapseError) +from synapse.api.errors import HttpResponseException, SynapseError from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) -- cgit 1.5.1 From 1ccafb0c5e3e6c9c64ce231163e7c73f05b910fa Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 13 Oct 2018 21:14:21 +0100 Subject: no need to join room if creator --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2b269ab692..2f7bdb0a20 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -246,7 +246,8 @@ class RegistrationHandler(BaseHandler): }, ratelimit=False, ) - yield self._join_user_to_room(fake_requester, r) + else: + yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) -- cgit 1.5.1 From fb216a22dbceebeee59aedfc6f888b73f4492908 Mon Sep 17 00:00:00 2001 From: Ivan Shapovalov Date: Sun, 14 Oct 2018 11:37:04 +0300 Subject: synapse/visibility.py: fix SyntaxError on py3.7 --- changelog.d/4033.bugfix | 1 + synapse/visibility.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4033.bugfix (limited to 'synapse') diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix new file mode 100644 index 0000000000..8f7a0bbbe9 --- /dev/null +++ b/changelog.d/4033.bugfix @@ -0,0 +1 @@ +Synapse now starts on Python 3.7. diff --git a/synapse/visibility.py b/synapse/visibility.py index c64ad2144c..43f48196be 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -219,7 +219,7 @@ def filter_events_for_server(store, server_name, events): # Whatever else we do, we need to check for senders which have requested # erasure of their data. erased_senders = yield store.are_users_erased( - e.sender for e in events, + (e.sender for e in events), ) def redact_disallowed(event, state): -- cgit 1.5.1 From 06bc8d2fe551b4390cbca9838bc547c5d0021f6d Mon Sep 17 00:00:00 2001 From: Ivan Shapovalov Date: Sun, 14 Oct 2018 11:37:38 +0300 Subject: synapse/app: frontend_proxy.py: actually make workers work on py3 --- changelog.d/4033.bugfix | 1 + synapse/app/frontend_proxy.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/changelog.d/4033.bugfix b/changelog.d/4033.bugfix index 8f7a0bbbe9..4e9e38cdcf 100644 --- a/changelog.d/4033.bugfix +++ b/changelog.d/4033.bugfix @@ -1 +1,2 @@ Synapse now starts on Python 3.7. +_All_ workers now start on Python 3. diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index fc4b25de1c..f5c61dec5b 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -68,7 +68,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet): "Authorization": auth_headers, } result = yield self.http_client.get_json( - self.main_uri + request.uri, + self.main_uri + request.uri.decode('ascii'), headers=headers, ) defer.returnValue((200, result)) @@ -125,7 +125,7 @@ class KeyUploadServlet(RestServlet): "Authorization": auth_headers, } result = yield self.http_client.post_json_get_json( - self.main_uri + request.uri, + self.main_uri + request.uri.decode('ascii'), body, headers=headers, ) -- cgit 1.5.1 From f726f2dc6c398eb5fdf9d2dc0c56e96d930537ba Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 15 Oct 2018 22:21:45 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 43c5821ade..ea07fda14a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.6" +__version__ = "0.33.7rc1" -- cgit 1.5.1 From b8a5b0097c4fc88a43c5c5f9785720d38299d3d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 16 Oct 2018 10:44:49 +0100 Subject: Various cleanups in the federation client code (#4031) - Improve logging: log things in the right order, include destination and txids in all log lines, don't log successful responses twice - Fix the docstring on TransportLayerClient.send_transaction - Don't use treq.request, which is overcomplicated for our purposes: just use a twisted.web.client.Agent. - simplify the logic for setting up the bodyProducer - fix bytes/str confusions --- changelog.d/4031.misc | 1 + synapse/federation/transaction_queue.py | 27 +++++------- synapse/federation/transport/client.py | 19 ++++---- synapse/http/matrixfederationclient.py | 78 +++++++++++++++++---------------- 4 files changed, 64 insertions(+), 61 deletions(-) create mode 100644 changelog.d/4031.misc (limited to 'synapse') diff --git a/changelog.d/4031.misc b/changelog.d/4031.misc new file mode 100644 index 0000000000..60be8b59fd --- /dev/null +++ b/changelog.d/4031.misc @@ -0,0 +1 @@ +Various cleanups in the federation client code diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 98b5950800..3fdd63be95 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -633,14 +633,6 @@ class TransactionQueue(object): transaction, json_data_cb ) code = 200 - - if response: - for e_id, r in response.get("pdus", {}).items(): - if "error" in r: - logger.warn( - "Transaction returned error for %s: %s", - e_id, r, - ) except HttpResponseException as e: code = e.code response = e.response @@ -657,19 +649,24 @@ class TransactionQueue(object): destination, txn_id, code ) - logger.debug("TX [%s] Sent transaction", destination) - logger.debug("TX [%s] Marking as delivered...", destination) - yield self.transaction_actions.delivered( transaction, code, response ) - logger.debug("TX [%s] Marked as delivered", destination) + logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id) - if code != 200: + if code == 200: + for e_id, r in response.get("pdus", {}).items(): + if "error" in r: + logger.warn( + "TX [%s] {%s} Remote returned error for %s: %s", + destination, txn_id, e_id, r, + ) + else: for p in pdus: - logger.info( - "Failed to send event %s to %s", p.event_id, destination + logger.warn( + "TX [%s] {%s} Failed to send event %s", + destination, txn_id, p.event_id, ) success = False diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2ab973d6c8..edba5a9808 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -143,9 +143,17 @@ class TransportLayerClient(object): transaction (Transaction) Returns: - Deferred: Results of the deferred is a tuple in the form of - (response_code, response_body) where the response_body is a - python dict decoded from json + Deferred: Succeeds when we get a 2xx HTTP response. The result + will be the decoded JSON body. + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. + + Fails with ``NotRetryingDestination`` if we are not yet ready + to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ logger.debug( "send_data dest=%s, txid=%s", @@ -170,11 +178,6 @@ class TransportLayerClient(object): backoff_on_404=True, # If we get a 404 the other side has gone ) - logger.debug( - "send_data dest=%s, txid=%s, got response: 200", - transaction.destination, transaction.transaction_id, - ) - defer.returnValue(response) @defer.inlineCallbacks diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 14b12cd1c4..fcc02fc77d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -195,7 +195,7 @@ class MatrixFederationHttpClient(object): ) self.clock = hs.get_clock() self._store = hs.get_datastore() - self.version_string = hs.version_string.encode('ascii') + self.version_string_bytes = hs.version_string.encode('ascii') self.default_timeout = 60 def schedule(x): @@ -261,8 +261,8 @@ class MatrixFederationHttpClient(object): ignore_backoff=ignore_backoff, ) - method = request.method - destination = request.destination + method_bytes = request.method.encode("ascii") + destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) @@ -270,8 +270,8 @@ class MatrixFederationHttpClient(object): query_bytes = b"" headers_dict = { - "User-Agent": [self.version_string], - "Host": [request.destination], + b"User-Agent": [self.version_string_bytes], + b"Host": [destination_bytes], } with limiter: @@ -282,50 +282,51 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - url = urllib.parse.urlunparse(( - b"matrix", destination.encode("ascii"), + url_bytes = urllib.parse.urlunparse(( + b"matrix", destination_bytes, path_bytes, None, query_bytes, b"", - )).decode('ascii') + )) + url_str = url_bytes.decode('ascii') - http_url = urllib.parse.urlunparse(( + url_to_sign_bytes = urllib.parse.urlunparse(( b"", b"", path_bytes, None, query_bytes, b"", - )).decode('ascii') + )) while True: try: json = request.get_json() if json: - data = encode_canonical_json(json) - headers_dict["Content-Type"] = ["application/json"] + headers_dict[b"Content-Type"] = [b"application/json"] self.sign_request( - destination, method, http_url, headers_dict, json + destination_bytes, method_bytes, url_to_sign_bytes, + headers_dict, json, ) - else: - data = None - self.sign_request(destination, method, http_url, headers_dict) - - logger.info( - "{%s} [%s] Sending request: %s %s", - request.txn_id, destination, method, url - ) - - if data: + data = encode_canonical_json(json) producer = FileBodyProducer( BytesIO(data), - cooperator=self._cooperator + cooperator=self._cooperator, ) else: producer = None + self.sign_request( + destination_bytes, method_bytes, url_to_sign_bytes, + headers_dict, + ) - request_deferred = treq.request( - method, - url, + logger.info( + "{%s} [%s] Sending request: %s %s", + request.txn_id, request.destination, request.method, + url_str, + ) + + # we don't want all the fancy cookie and redirect handling that + # treq.request gives: just use the raw Agent. + request_deferred = self.agent.request( + method_bytes, + url_bytes, headers=Headers(headers_dict), - data=producer, - agent=self.agent, - reactor=self.hs.get_reactor(), - unbuffered=True + bodyProducer=producer, ) request_deferred = timeout_deferred( @@ -344,9 +345,9 @@ class MatrixFederationHttpClient(object): logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, - destination, - method, - url, + request.destination, + request.method, + url_str, _flatten_response_never_received(e), ) @@ -366,7 +367,7 @@ class MatrixFederationHttpClient(object): logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, - destination, + request.destination, delay, ) @@ -378,7 +379,7 @@ class MatrixFederationHttpClient(object): logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, - destination, + request.destination, response.code, response.phrase.decode('ascii', errors='replace'), ) @@ -411,8 +412,9 @@ class MatrixFederationHttpClient(object): destination_is must be non-None. method (bytes): The HTTP method of the request url_bytes (bytes): The URI path of the request - headers_dict (dict): Dictionary of request headers to append to - content (bytes): The body of the request + headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to + append to + content (object): The body of the request destination_is (bytes): As 'destination', but if the destination is an identity server -- cgit 1.5.1 From e238013c44714174f1bf9a2b9b1f576728f40784 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 15:13:17 +0100 Subject: Add v2 state res algorithm. We hook this up to the vdh test room version. --- synapse/state/__init__.py | 87 ++++++-- synapse/state/v2.py | 544 ++++++++++++++++++++++++++++++++++++++++++++++ synapse/storage/events.py | 9 +- 3 files changed, 616 insertions(+), 24 deletions(-) create mode 100644 synapse/state/v2.py (limited to 'synapse') diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b22495c1f9..836e137296 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -19,13 +19,14 @@ from collections import namedtuple from six import iteritems, itervalues +import attr from frozendict import frozendict from twisted.internet import defer from synapse.api.constants import EventTypes, RoomVersions from synapse.events.snapshot import EventContext -from synapse.state import v1 +from synapse.state import v1, v2 from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache @@ -372,15 +373,10 @@ class StateHandler(object): result = yield self._state_resolution_handler.resolve_state_groups( room_id, room_version, state_groups_ids, None, - self._state_map_factory, + state_res_store=StateResolutionStore(self.store), ) defer.returnValue(result) - def _state_map_factory(self, ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( @@ -401,7 +397,7 @@ class StateHandler(object): new_state = yield resolve_events_with_factory( room_version, state_set_ids, event_map=state_map, - state_map_factory=self._state_map_factory + state_res_store=StateResolutionStore(self.store), ) new_state = { @@ -436,7 +432,7 @@ class StateResolutionHandler(object): @defer.inlineCallbacks @log_function def resolve_state_groups( - self, room_id, room_version, state_groups_ids, event_map, state_map_factory, + self, room_id, room_version, state_groups_ids, event_map, state_res_store, ): """Resolves conflicts between a set of state groups @@ -454,9 +450,11 @@ class StateResolutionHandler(object): a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. + events will be requested via state_res_store. + + If None, all events will be fetched via state_res_store. - If None, all events will be fetched via state_map_factory. + state_res_store (StateResolutionStore) Returns: Deferred[_StateCacheEntry]: resolved state @@ -502,7 +500,7 @@ class StateResolutionHandler(object): room_version, list(itervalues(state_groups_ids)), event_map=event_map, - state_map_factory=state_map_factory, + state_res_store=state_res_store, ) # if the new state matches any of the input state groups, we can @@ -583,7 +581,7 @@ def _make_state_cache_entry( ) -def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): +def resolve_events_with_factory(room_version, state_sets, event_map, state_res_store): """ Args: room_version(str): Version of the room @@ -599,17 +597,19 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f If None, all events will be fetched via state_map_factory. - state_map_factory(func): will be called - with a list of event_ids that are needed, and should return with - a Deferred of dict of event_id to event. + state_res_store (StateResolutionStore) Returns Deferred[dict[(str, str), str]]: a map from (type, state_key) to event_id. """ - if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): + if room_version == RoomVersions.V1: return v1.resolve_events_with_factory( - state_sets, event_map, state_map_factory, + state_sets, event_map, state_res_store.get_events, + ) + elif room_version == RoomVersions.VDH_TEST: + return v2.resolve_events_with_factory( + state_sets, event_map, state_res_store, ) else: # This should only happen if we added a version but forgot to add it to @@ -617,3 +617,54 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f raise Exception( "No state resolution algorithm defined for version %r" % (room_version,) ) + + +@attr.s +class StateResolutionStore(object): + """Interface that allows state resolution algorithms to access the database + in well defined way. + + Args: + store (DataStore) + """ + + store = attr.ib() + + def get_events(self, event_ids, allow_rejected=False): + """Get events from the database + + Args: + event_ids (list): The event_ids of the events to fetch + allow_rejected (bool): If True return rejected events. + + Returns: + Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + """ + + return self.store.get_events( + event_ids, + check_redacted=False, + get_prev_content=False, + allow_rejected=allow_rejected, + ) + + def get_auth_chain(self, event_ids): + """Gets the full auth chain for a set of events (including rejected + events). + + Includes the given event IDs in the result. + + Note that: + 1. All events must be state events. + 2. For v1 rooms this may not have the full auth chain in the + presence of rejected events + + Args: + event_ids (list): The event IDs of the events to fetch the auth + chain for. Must be state events. + + Returns: + Deferred[list[str]]: List of event IDs of the auth chain. + """ + + return self.store.get_auth_chain_ids(event_ids, include_given=True) diff --git a/synapse/state/v2.py b/synapse/state/v2.py new file mode 100644 index 0000000000..d034b4f38e --- /dev/null +++ b/synapse/state/v2.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import heapq +import itertools +import logging + +from six import iteritems, itervalues + +from twisted.internet import defer + +from synapse import event_auth +from synapse.api.constants import EventTypes +from synapse.api.errors import AuthError + +logger = logging.getLogger(__name__) + + +@defer.inlineCallbacks +def resolve_events_with_factory(state_sets, event_map, state_res_store): + """Resolves the state using the v2 state resolution algorithm + + Args: + state_sets(list): List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map(dict[str,FrozenEvent]|None): + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_res_store. + + If None, all events will be fetched via state_res_store. + + state_res_store (StateResolutionStore) + + Returns + Deferred[dict[(str, str), str]]: + a map from (type, state_key) to event_id. + """ + + logger.debug("Computing conflicted state") + + # First split up the un/conflicted state + unconflicted_state, conflicted_state = _seperate(state_sets) + + if not conflicted_state: + defer.returnValue(unconflicted_state) + + logger.debug("%d conflicted state entries", len(conflicted_state)) + logger.debug("Calculating auth chain difference") + + # Also fetch all auth events that appear in only some of the state sets' + # auth chains. + auth_diff = yield _get_auth_chain_difference( + state_sets, event_map, state_res_store, + ) + + full_conflicted_set = set(itertools.chain( + itertools.chain.from_iterable(itervalues(conflicted_state)), + auth_diff, + )) + + events = yield state_res_store.get_events([ + eid for eid in full_conflicted_set + if eid not in event_map + ], allow_rejected=True) + event_map.update(events) + + full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map) + + logger.debug("%d full_conflicted_set entries", len(full_conflicted_set)) + + # Get and sort all the power events (kicks/bans/etc) + power_events = ( + eid for eid in full_conflicted_set + if _is_power_event(event_map[eid]) + ) + + sorted_power_events = yield _reverse_topological_power_sort( + power_events, + event_map, + state_res_store, + full_conflicted_set, + ) + + logger.debug("sorted %d power events", len(sorted_power_events)) + + # Now sequentially auth each one + resolved_state = yield _iterative_auth_checks( + sorted_power_events, unconflicted_state, event_map, + state_res_store, + ) + + logger.debug("resolved power events") + + # OK, so we've now resolved the power events. Now sort the remaining + # events using the mainline of the resolved power level. + + leftover_events = [ + ev_id + for ev_id in full_conflicted_set + if ev_id not in sorted_power_events + ] + + logger.debug("sorting %d remaining events", len(leftover_events)) + + pl = resolved_state.get((EventTypes.PowerLevels, ""), None) + leftover_events = yield _mainline_sort( + leftover_events, pl, event_map, state_res_store, + ) + + logger.debug("resolving remaining events") + + resolved_state = yield _iterative_auth_checks( + leftover_events, resolved_state, event_map, + state_res_store, + ) + + logger.debug("resolved") + + # We make sure that unconflicted state always still applies. + resolved_state.update(unconflicted_state) + + logger.debug("done") + + defer.returnValue(resolved_state) + + +@defer.inlineCallbacks +def _get_power_level_for_sender(event_id, event_map, state_res_store): + """Return the power level of the sender of the given event according to + their auth events. + + Args: + event_id (str) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[int] + """ + event = yield _get_event(event_id, event_map, state_res_store) + + pl = None + for aid, _ in event.auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): + pl = aev + break + + if pl is None: + # Couldn't find power level. Check if they're the creator of the room + for aid, _ in event.auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.Create, ""): + if aev.content.get("creator") == event.sender: + defer.returnValue(100) + break + defer.returnValue(0) + + level = pl.content.get("users", {}).get(event.sender) + if level is None: + level = pl.content.get("users_default", 0) + + if level is None: + defer.returnValue(0) + else: + defer.returnValue(int(level)) + + +@defer.inlineCallbacks +def _get_auth_chain_difference(state_sets, event_map, state_res_store): + """Compare the auth chains of each state set and return the set of events + that only appear in some but not all of the auth chains. + + Args: + state_sets (list) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[set[str]]: Set of event IDs + """ + common = set(itervalues(state_sets[0])).intersection( + *(itervalues(s) for s in state_sets[1:]) + ) + + auth_sets = [] + for state_set in state_sets: + auth_ids = set( + eid + for key, eid in iteritems(state_set) + if (key[0] in ( + EventTypes.Member, + EventTypes.ThirdPartyInvite, + ) or key in ( + (EventTypes.PowerLevels, ''), + (EventTypes.Create, ''), + (EventTypes.JoinRules, ''), + )) and eid not in common + ) + + auth_chain = yield state_res_store.get_auth_chain(auth_ids) + auth_ids.update(auth_chain) + + auth_sets.append(auth_ids) + + intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) + union = set().union(*auth_sets) + + defer.returnValue(union - intersection) + + +def _seperate(state_sets): + """Return the unconflicted and conflicted state. This is different than in + the original algorithm, as this defines a key to be conflicted if one of + the state sets doesn't have that key. + + Args: + state_sets (list) + + Returns: + tuple[dict, dict]: A tuple of unconflicted and conflicted state. The + conflicted state dict is a map from type/state_key to set of event IDs + """ + unconflicted_state = {} + conflicted_state = {} + + for key in set(itertools.chain.from_iterable(state_sets)): + event_ids = set(state_set.get(key) for state_set in state_sets) + if len(event_ids) == 1: + unconflicted_state[key] = event_ids.pop() + else: + event_ids.discard(None) + conflicted_state[key] = event_ids + + return unconflicted_state, conflicted_state + + +def _is_power_event(event): + """Return whether or not the event is a "power event", as defined by the + v2 state resolution algorithm + + Args: + event (FrozenEvent) + + Returns: + boolean + """ + if (event.type, event.state_key) in ( + (EventTypes.PowerLevels, ""), + (EventTypes.JoinRules, ""), + (EventTypes.Create, ""), + ): + return True + + if event.type == EventTypes.Member: + if event.membership in ('leave', 'ban'): + return event.sender != event.state_key + + return False + + +@defer.inlineCallbacks +def _add_event_and_auth_chain_to_graph(graph, event_id, event_map, + state_res_store, auth_diff): + """Helper function for _reverse_topological_power_sort that add the event + and its auth chain (that is in the auth diff) to the graph + + Args: + graph (dict[str, set[str]]): A map from event ID to the events auth + event IDs + event_id (str): Event to add to the graph + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + auth_diff (set[str]): Set of event IDs that are in the auth difference. + """ + + state = [event_id] + while state: + eid = state.pop() + graph.setdefault(eid, set()) + + event = yield _get_event(eid, event_map, state_res_store) + for aid, _ in event.auth_events: + if aid in auth_diff: + if aid not in graph: + state.append(aid) + + graph.setdefault(eid, set()).add(aid) + + +@defer.inlineCallbacks +def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_diff): + """Returns a list of the event_ids sorted by reverse topological ordering, + and then by power level and origin_server_ts + + Args: + event_ids (list[str]): The events to sort + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + auth_diff (set[str]): Set of event IDs that are in the auth difference. + + Returns: + Deferred[list[str]]: The sorted list + """ + + graph = {} + for event_id in event_ids: + yield _add_event_and_auth_chain_to_graph( + graph, event_id, event_map, state_res_store, auth_diff, + ) + + event_to_pl = {} + for event_id in graph: + pl = yield _get_power_level_for_sender(event_id, event_map, state_res_store) + event_to_pl[event_id] = pl + + def _get_power_order(event_id): + ev = event_map[event_id] + pl = event_to_pl[event_id] + + return -pl, ev.origin_server_ts, event_id + + # Note: graph is modified during the sort + it = lexicographical_topological_sort( + graph, + key=_get_power_order, + ) + sorted_events = list(it) + + defer.returnValue(sorted_events) + + +@defer.inlineCallbacks +def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store): + """Sequentially apply auth checks to each event in given list, updating the + state as it goes along. + + Args: + event_ids (list[str]): Ordered list of events to apply auth checks to + base_state (dict[tuple[str, str], str]): The set of state to start with + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[dict[tuple[str, str], str]]: Returns the final updated state + """ + resolved_state = base_state.copy() + + for event_id in event_ids: + event = event_map[event_id] + + auth_events = {} + for aid, _ in event.auth_events: + ev = yield _get_event(aid, event_map, state_res_store) + + if ev.rejected_reason is None: + auth_events[(ev.type, ev.state_key)] = ev + + for key in event_auth.auth_types_for_event(event): + if key in resolved_state: + ev_id = resolved_state[key] + ev = yield _get_event(ev_id, event_map, state_res_store) + + if ev.rejected_reason is None: + auth_events[key] = event_map[ev_id] + + try: + event_auth.check( + event, auth_events, + do_sig_check=False, + do_size_check=False + ) + + resolved_state[(event.type, event.state_key)] = event_id + except AuthError: + pass + + defer.returnValue(resolved_state) + + +@defer.inlineCallbacks +def _mainline_sort(event_ids, resolved_power_event_id, event_map, + state_res_store): + """Returns a sorted list of event_ids sorted by mainline ordering based on + the given event resolved_power_event_id + + Args: + event_ids (list[str]): Events to sort + resolved_power_event_id (str): The final resolved power level event ID + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[list[str]]: The sorted list + """ + mainline = [] + pl = resolved_power_event_id + while pl: + mainline.append(pl) + pl_ev = yield _get_event(pl, event_map, state_res_store) + auth_events = pl_ev.auth_events + pl = None + for aid, _ in auth_events: + ev = yield _get_event(aid, event_map, state_res_store) + if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""): + pl = aid + break + + mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(reversed(mainline))} + + event_ids = list(event_ids) + + order_map = {} + for ev_id in event_ids: + depth = yield _get_mainline_depth_for_event( + event_map[ev_id], mainline_map, + event_map, state_res_store, + ) + order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id) + + event_ids.sort(key=lambda ev_id: order_map[ev_id]) + + defer.returnValue(event_ids) + + +@defer.inlineCallbacks +def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_store): + """Get the mainline depths for the given event based on the mainline map + + Args: + event (FrozenEvent) + mainline_map (dict[str, int]): Map from event_id to mainline depth for + events in the mainline. + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[int] + """ + + # We do an iterative search, replacing `event with the power level in its + # auth events (if any) + while event: + depth = mainline_map.get(event.event_id) + if depth is not None: + defer.returnValue(depth) + + auth_events = event.auth_events + event = None + + for aid, _ in auth_events: + aev = yield _get_event(aid, event_map, state_res_store) + if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): + event = aev + break + + # Didn't find a power level auth event, so we just return 0 + defer.returnValue(0) + + +@defer.inlineCallbacks +def _get_event(event_id, event_map, state_res_store): + """Helper function to look up event in event_map, falling back to looking + it up in the store + + Args: + event_id (str) + event_map (dict[str,FrozenEvent]) + state_res_store (StateResolutionStore) + + Returns: + Deferred[FrozenEvent] + """ + if event_id not in event_map: + events = yield state_res_store.get_events([event_id], allow_rejected=True) + event_map.update(events) + defer.returnValue(event_map[event_id]) + + +def lexicographical_topological_sort(graph, key): + """Performs a lexicographic reverse topological sort on the graph. + + This returns a reverse topological sort (i.e. if node A references B then B + appears before A in the sort), with ties broken lexicographically based on + return value of the `key` function. + + NOTE: `graph` is modified during the sort. + + Args: + graph (dict[str, set[str]]): A representation of the graph where each + node is a key in the dict and its value are the nodes edges. + key (func): A function that takes a node and returns a value that is + comparable and used to order nodes + + Yields: + str: The next node in the topological sort + """ + + # Note, this is basically Kahn's algorithm except we look at nodes with no + # outgoing edges, c.f. + # https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + outdegree_map = graph + reverse_graph = {} + + # Lists of nodes with zero out degree. Is actually a tuple of + # `(key(node), node)` so that sorting does the right thing + zero_outdegree = [] + + for node, edges in iteritems(graph): + if len(edges) == 0: + zero_outdegree.append((key(node), node)) + + reverse_graph.setdefault(node, set()) + for edge in edges: + reverse_graph.setdefault(edge, set()).add(node) + + # heapq is a built in implementation of a sorted queue. + heapq.heapify(zero_outdegree) + + while zero_outdegree: + _, node = heapq.heappop(zero_outdegree) + + for parent in reverse_graph[node]: + out = outdegree_map[parent] + out.discard(node) + if len(out) == 0: + heapq.heappush(zero_outdegree, (key(parent), parent)) + + yield node diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 03cedf3a75..fc88edcb39 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -34,6 +34,7 @@ from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.state import StateResolutionStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore @@ -731,11 +732,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # Ok, we need to defer to the state handler to resolve our state sets. - def get_events(ev_ids): - return self.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - state_groups = { sg: state_groups_map[sg] for sg in new_state_groups } @@ -745,7 +741,8 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.debug("calling resolve_state_groups from preserve_events") res = yield self._state_resolution_handler.resolve_state_groups( - room_id, room_version, state_groups, events_map, get_events + room_id, room_version, state_groups, events_map, + state_res_store=StateResolutionStore(self) ) defer.returnValue((res.state, None)) -- cgit 1.5.1 From 6bd856caa29a022a537ccd415bae8c286baa6881 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 16:15:51 +0100 Subject: User event.sender rather than alias event.user_id --- synapse/event_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/event_auth.py b/synapse/event_auth.py index af3eee95b9..d4d4474847 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -690,7 +690,7 @@ def auth_types_for_event(event): auth_types = [] auth_types.append((EventTypes.PowerLevels, "", )) - auth_types.append((EventTypes.Member, event.user_id, )) + auth_types.append((EventTypes.Member, event.sender, )) auth_types.append((EventTypes.Create, "", )) if event.type == EventTypes.Member: -- cgit 1.5.1 From 15133477ee6ea624dd937329c41d2d3c8ff2bd56 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 13:58:37 +0100 Subject: Fix up use of resolve_events_with_factory --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..b028d58ae4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -53,7 +53,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet -from synapse.state import resolve_events_with_factory +from synapse.state import StateResolutionStore, resolve_events_with_factory from synapse.types import UserID, get_domain_from_id from synapse.util import logcontext, unwrapFirstError from synapse.util.async_helpers import Linearizer @@ -384,21 +384,18 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x - # Resolve any conflicting state - @defer.inlineCallbacks - def fetch(ev_ids): - fetched = yield self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False, - ) - # add any events we fetch here to the `event_map` so that we - # can use them to build the state event list below. - event_map.update(fetched) - defer.returnValue(fetched) - room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_maps, event_map, fetch, + room_version, state_maps, event_map, + state_res_store=StateResolutionStore(self.store), + ) + + evs = yield self.store.get_events( + list(state_map.values()), + get_prev_content=False, + check_redacted=False, ) + event_map.update(evs) # we need to give _process_received_pdu the actual state events # rather than event ids, so generate that now. -- cgit 1.5.1 From 4a28d3d36fa21446e60a2a5142626a385e1f27af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Oct 2018 14:01:53 +0100 Subject: Update event_auth table for rejected events --- synapse/storage/events.py | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index fc88edcb39..c780f55277 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -851,6 +851,27 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # Insert into event_to_state_groups. self._store_event_state_mappings_txn(txn, events_and_contexts) + # We want to store event_auth mappings for rejected events, as they're + # used in state res v2. + # This is only necessary if the rejected event appears in an accepted + # event's auth chain, but its easier for now just to store them (and + # it doesn't take much storage compared to storing the entire event + # anyway). + self._simple_insert_many_txn( + txn, + table="event_auth", + values=[ + { + "event_id": event.event_id, + "room_id": event.room_id, + "auth_id": auth_id, + } + for event, _ in events_and_contexts + for auth_id, _ in event.auth_events + if event.is_state() + ], + ) + # _store_rejected_events_txn filters out any events which were # rejected, and returns the filtered list. events_and_contexts = self._store_rejected_events_txn( @@ -1326,21 +1347,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore txn, event.room_id, event.redacts ) - self._simple_insert_many_txn( - txn, - table="event_auth", - values=[ - { - "event_id": event.event_id, - "room_id": event.room_id, - "auth_id": auth_id, - } - for event, _ in events_and_contexts - for auth_id, _ in event.auth_events - if event.is_state() - ], - ) - # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. self._handle_mult_prev_events( -- cgit 1.5.1 From fc0f13dd036cec4e41f5969d021d9dd10d6e5016 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 20:37:16 +0100 Subject: Fix incorrect truncation in get_missing_events It's quite important that get_missing_events returns the *latest* events in the room; however we were pulling event ids out of the database until we got *at least* 10, and then taking the *earliest* of the results. We also shouldn't really be relying on depth, and should be checking the room_id. --- changelog.d/4045.bugfix | 1 + synapse/federation/federation_server.py | 8 +++---- synapse/federation/transport/server.py | 2 -- synapse/handlers/federation.py | 12 +++++------ synapse/storage/event_federation.py | 38 ++++++++++++++------------------- 5 files changed, 26 insertions(+), 35 deletions(-) create mode 100644 changelog.d/4045.bugfix (limited to 'synapse') diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix new file mode 100644 index 0000000000..fa50eb5aff --- /dev/null +++ b/changelog.d/4045.bugfix @@ -0,0 +1 @@ +Fix bug which made get_missing_events return too few events \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 819e8f7331..4efe95faa4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -507,19 +507,19 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): with (yield self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) yield self.check_server_matches_acl(origin_host, room_id) logger.info( "on_get_missing_events: earliest_events: %r, latest_events: %r," - " limit: %d, min_depth: %d", - earliest_events, latest_events, limit, min_depth + " limit: %d", + earliest_events, latest_events, limit, ) missing_events = yield self.handler.on_get_missing_events( - origin, room_id, earliest_events, latest_events, limit, min_depth + origin, room_id, earliest_events, latest_events, limit, ) if len(missing_events) < 5: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2f874b4838..7288d49074 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -560,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, room_id): limit = int(content.get("limit", 10)) - min_depth = int(content.get("min_depth", 0)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) @@ -569,7 +568,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, - min_depth=min_depth, limit=limit, ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..cab57a8849 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -309,8 +309,8 @@ class FederationHandler(BaseHandler): if sent_to_us_directly: logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), + "[%s %s] Rejecting: failed to fetch %d prev events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) raise FederationError( "ERROR", @@ -452,8 +452,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "[%s %s]: Requesting %d prev_events: %s", - room_id, event_id, len(prevs - seen), shortstr(prevs - seen) + "[%s %s]: Requesting missing events between %s and %s", + room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround @@ -1852,7 +1852,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): in_room = yield self.auth.check_host_in_room( room_id, origin @@ -1861,14 +1861,12 @@ class FederationHandler(BaseHandler): raise AuthError(403, "Host not in room.") limit = min(limit, 20) - min_depth = max(min_depth, 0) missing_events = yield self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, - min_depth=min_depth, ) missing_events = yield filter_events_for_server( diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 24345b20a6..3faca2a042 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -376,33 +376,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, @defer.inlineCallbacks def get_missing_events(self, room_id, earliest_events, latest_events, - limit, min_depth): + limit): ids = yield self.runInteraction( "get_missing_events", self._get_missing_events, - room_id, earliest_events, latest_events, limit, min_depth + room_id, earliest_events, latest_events, limit, ) - events = yield self._get_events(ids) - - events = sorted( - [ev for ev in events if ev.depth >= min_depth], - key=lambda e: e.depth, - ) - - defer.returnValue(events[:limit]) + defer.returnValue(events) def _get_missing_events(self, txn, room_id, earliest_events, latest_events, - limit, min_depth): - - earliest_events = set(earliest_events) - front = set(latest_events) - earliest_events + limit): - event_results = set() + seen_events = set(earliest_events) + front = set(latest_events) - seen_events + event_results = [] query = ( "SELECT prev_event_id FROM event_edges " - "WHERE event_id = ? AND is_state = ? " + "WHERE room_id = ? AND event_id = ? AND is_state = ? " "LIMIT ?" ) @@ -411,18 +403,20 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, for event_id in front: txn.execute( query, - (event_id, False, limit - len(event_results)) + (room_id, event_id, False, limit - len(event_results)) ) - for e_id, in txn: - new_front.add(e_id) + new_results = set(t[0] for t in txn) - seen_events - new_front -= earliest_events - new_front -= event_results + new_front |= new_results + seen_events |= new_results + event_results.extend(new_results) front = new_front - event_results |= new_front + # we built the list working backwards from latest_events; we now need to + # reverse it so that the events are approximately chronological. + event_results.reverse() return event_results -- cgit 1.5.1 From c6584f4b5f9cc495478e03e01f85fd2399cf6f8d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 17 Oct 2018 11:36:41 +0100 Subject: clean up config error logic and imports --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 9 ++++----- synapse/handlers/register.py | 30 ++++++++++++++++-------------- 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'synapse') diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 496ba4f4a0..79c2711fb4 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto join rooms, should autocreate those rooms when first user registers +Servers with auto-join rooms, should automatically create those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 43ff20a637..4b9bf6f2d1 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool -from synapse.config._base import ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config +from ._base import Config, ConfigError class RegistrationConfig(Config): @@ -48,7 +47,7 @@ class RegistrationConfig(Config): self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: if not RoomAlias.is_valid(room_alias): - raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) + raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -106,10 +105,10 @@ class RegistrationConfig(Config): # - "#example:example.com" # Where auto_join_rooms are specified, setting this flag ensures that the - # the rooms exists by creating them when the first user on the + # the rooms exist by creating them when the first user on the # homeserver registers. # Setting to false means that if the rooms are not manually created, - # users cannot be auto joined since they do not exist. + # users cannot be auto-joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2f7bdb0a20..1b5873c8d7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,7 +26,6 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) -from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -51,6 +50,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self.room_creation_handler = self.hs.get_room_creation_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -231,21 +231,23 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - room_creation_handler = self.hs.get_room_creation_handler() if self.hs.hostname != RoomAlias.from_string(r).domain: - raise ConfigError( - 'Cannot create room alias %s, it does not match server domain' + logger.warn( + 'Cannot create room alias %s, ' + 'it does not match server domain' % (r,) + ) + raise SynapseError() + else: + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield self.room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, ) - # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart - yield room_creation_handler.create_room( - fake_requester, - config={ - "preset": "public_chat", - "room_alias_name": room_alias_localpart - }, - ratelimit=False, - ) else: yield self._join_user_to_room(fake_requester, r) except Exception as e: -- cgit 1.5.1 From d6a7797dd1d76a86e6914c2ae562fa83eee4c57f Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 17 Oct 2018 13:04:55 +0100 Subject: Fix roomlist since tokens on Python 3 (#4046) Thanks @Half-Shot !!! --- changelog.d/4046.bugfix | 1 + synapse/handlers/room_list.py | 11 +++++++++-- synapse/python_dependencies.py | 2 +- tests/handlers/test_roomlist.py | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4046.bugfix create mode 100644 tests/handlers/test_roomlist.py (limited to 'synapse') diff --git a/changelog.d/4046.bugfix b/changelog.d/4046.bugfix new file mode 100644 index 0000000000..5046dd1ce3 --- /dev/null +++ b/changelog.d/4046.bugfix @@ -0,0 +1 @@ +Fix issue where Python 3 users couldn't paginate /publicRooms diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 38e1737ec9..dc88620885 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -16,7 +16,7 @@ import logging from collections import namedtuple -from six import iteritems +from six import PY3, iteritems from six.moves import range import msgpack @@ -444,9 +444,16 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", ( @classmethod def from_token(cls, token): + if PY3: + # The argument raw=False is only available on new versions of + # msgpack, and only really needed on Python 3. Gate it behind + # a PY3 check to avoid causing issues on Debian-packaged versions. + decoded = msgpack.loads(decode_base64(token), raw=False) + else: + decoded = msgpack.loads(decode_base64(token)) return RoomListNextBatch(**{ cls.REVERSE_KEY_DICT[key]: val - for key, val in msgpack.loads(decode_base64(token)).items() + for key, val in decoded.items() }) def to_token(self): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2947f37f1a..f51184b50d 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -55,7 +55,7 @@ REQUIREMENTS = { "sortedcontainers>=1.4.4": ["sortedcontainers"], "pysaml2>=3.0.0": ["saml2"], "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], - "msgpack-python>=0.3.0": ["msgpack"], + "msgpack-python>=0.4.2": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six>=1.10": ["six"], diff --git a/tests/handlers/test_roomlist.py b/tests/handlers/test_roomlist.py new file mode 100644 index 0000000000..61eebb6985 --- /dev/null +++ b/tests/handlers/test_roomlist.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.handlers.room_list import RoomListNextBatch + +import tests.unittest +import tests.utils + + +class RoomListTestCase(tests.unittest.TestCase): + """ Tests RoomList's RoomListNextBatch. """ + + def setUp(self): + pass + + def test_check_read_batch_tokens(self): + batch_token = RoomListNextBatch( + stream_ordering="abcdef", + public_room_stream_id="123", + current_limit=20, + direction_is_forward=True, + ).to_token() + next_batch = RoomListNextBatch.from_token(batch_token) + self.assertEquals(next_batch.stream_ordering, "abcdef") + self.assertEquals(next_batch.public_room_stream_id, "123") + self.assertEquals(next_batch.current_limit, 20) + self.assertEquals(next_batch.direction_is_forward, True) -- cgit 1.5.1 From f6a0a02a62db1a3030e5563d6454d01c4a76c38d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:09:34 +0100 Subject: Fix bug where we raised StopIteration in a generator This made python 3.7 unhappy --- synapse/rest/media/v1/preview_url_resource.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index af01040a38..8c892ff187 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -596,10 +596,13 @@ def _iterate_over_text(tree, *tags_to_ignore): # to be returned. elements = iter([tree]) while True: - el = next(elements) + el = next(elements, None) + if el is None: + return + if isinstance(el, string_types): yield el - elif el is not None and el.tag not in tags_to_ignore: + elif el.tag not in tags_to_ignore: # el.text is the text before the first child, so we can immediately # return it if the text exists. if el.text: -- cgit 1.5.1 From 1519572961d22aeaacbe9fccae7788f0f9e72b1c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 15:44:34 +0100 Subject: Ship the email templates as package_data move the example email templates into the synapse package so that they can be used as package data, which should mean that all of the packaging mechanisms (pip, docker, debian, arch, etc) should now come with the example templates. In order to grandfather in people who relied on the templates being in the old place, check for that situation and fall back to using the defaults if the templates directory does not exist. --- MANIFEST.in | 2 +- changelog.d/4052.feature | 12 +++ docker/conf/homeserver.yaml | 4 +- res/templates/mail-Vector.css | 7 -- res/templates/mail.css | 156 ---------------------------------- res/templates/notif.html | 45 ---------- res/templates/notif.txt | 16 ---- res/templates/notif_mail.html | 55 ------------ res/templates/notif_mail.txt | 10 --- res/templates/room.html | 33 ------- res/templates/room.txt | 9 -- synapse/config/emailconfig.py | 33 ++++++- synapse/push/mailer.py | 5 +- synapse/res/templates/mail-Vector.css | 7 ++ synapse/res/templates/mail.css | 156 ++++++++++++++++++++++++++++++++++ synapse/res/templates/notif.html | 45 ++++++++++ synapse/res/templates/notif.txt | 16 ++++ synapse/res/templates/notif_mail.html | 55 ++++++++++++ synapse/res/templates/notif_mail.txt | 10 +++ synapse/res/templates/room.html | 33 +++++++ synapse/res/templates/room.txt | 9 ++ 21 files changed, 381 insertions(+), 337 deletions(-) create mode 100644 changelog.d/4052.feature delete mode 100644 res/templates/mail-Vector.css delete mode 100644 res/templates/mail.css delete mode 100644 res/templates/notif.html delete mode 100644 res/templates/notif.txt delete mode 100644 res/templates/notif_mail.html delete mode 100644 res/templates/notif_mail.txt delete mode 100644 res/templates/room.html delete mode 100644 res/templates/room.txt create mode 100644 synapse/res/templates/mail-Vector.css create mode 100644 synapse/res/templates/mail.css create mode 100644 synapse/res/templates/notif.html create mode 100644 synapse/res/templates/notif.txt create mode 100644 synapse/res/templates/notif_mail.html create mode 100644 synapse/res/templates/notif_mail.txt create mode 100644 synapse/res/templates/room.html create mode 100644 synapse/res/templates/room.txt (limited to 'synapse') diff --git a/MANIFEST.in b/MANIFEST.in index c6a37ac685..25cdf0a61b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,12 +12,12 @@ recursive-include synapse/storage/schema *.sql recursive-include synapse/storage/schema *.py recursive-include docs * -recursive-include res * recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py +recursive-include synapse/res * recursive-include synapse/static *.css recursive-include synapse/static *.gif recursive-include synapse/static *.html diff --git a/changelog.d/4052.feature b/changelog.d/4052.feature new file mode 100644 index 0000000000..0c01f06880 --- /dev/null +++ b/changelog.d/4052.feature @@ -0,0 +1,12 @@ +Ship the example email templates as part of the package + +**Note**: if you deploy your Synapse instance from a git checkout or a github +snapshot URL, then this means that the example email templates will no longer +be installed in `res/templates`. If you have email notifications enabled, you +should ensure that `email.template_dir` is either configured to point at a +directory where you have installed customised templates, or leave it unset to +use the default templates. + +The configuration parser will try to detect the situation where +`email.template_dir` is incorrectly set to `res/templates` and do the right +thing, but will warn about this. \ No newline at end of file diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index cfe88788f2..a38b929f50 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -211,7 +211,9 @@ email: require_transport_security: False notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}" app_name: Matrix - template_dir: res/templates + # if template_dir is unset, uses the example templates that are part of + # the Synapse distribution. + #template_dir: res/templates notif_template_html: notif_mail.html notif_template_text: notif_mail.txt notif_for_new_users: True diff --git a/res/templates/mail-Vector.css b/res/templates/mail-Vector.css deleted file mode 100644 index 6a3e36eda1..0000000000 --- a/res/templates/mail-Vector.css +++ /dev/null @@ -1,7 +0,0 @@ -.header { - border-bottom: 4px solid #e4f7ed ! important; -} - -.notif_link a, .footer a { - color: #76CFA6 ! important; -} diff --git a/res/templates/mail.css b/res/templates/mail.css deleted file mode 100644 index 5ab3e1b06d..0000000000 --- a/res/templates/mail.css +++ /dev/null @@ -1,156 +0,0 @@ -body { - margin: 0px; -} - -pre, code { - word-break: break-word; - white-space: pre-wrap; -} - -#page { - font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; - font-color: #454545; - font-size: 12pt; - width: 100%; - padding: 20px; -} - -#inner { - width: 640px; -} - -.header { - width: 100%; - height: 87px; - color: #454545; - border-bottom: 4px solid #e5e5e5; -} - -.logo { - text-align: right; - margin-left: 20px; -} - -.salutation { - padding-top: 10px; - font-weight: bold; -} - -.summarytext { -} - -.room { - width: 100%; - color: #454545; - border-bottom: 1px solid #e5e5e5; -} - -.room_header td { - padding-top: 38px; - padding-bottom: 10px; - border-bottom: 1px solid #e5e5e5; -} - -.room_name { - vertical-align: middle; - font-size: 18px; - font-weight: bold; -} - -.room_header h2 { - margin-top: 0px; - margin-left: 75px; - font-size: 20px; -} - -.room_avatar { - width: 56px; - line-height: 0px; - text-align: center; - vertical-align: middle; -} - -.room_avatar img { - width: 48px; - height: 48px; - object-fit: cover; - border-radius: 24px; -} - -.notif { - border-bottom: 1px solid #e5e5e5; - margin-top: 16px; - padding-bottom: 16px; -} - -.historical_message .sender_avatar { - opacity: 0.3; -} - -/* spell out opacity and historical_message class names for Outlook aka Word */ -.historical_message .sender_name { - color: #e3e3e3; -} - -.historical_message .message_time { - color: #e3e3e3; -} - -.historical_message .message_body { - color: #c7c7c7; -} - -.historical_message td, -.message td { - padding-top: 10px; -} - -.sender_avatar { - width: 56px; - text-align: center; - vertical-align: top; -} - -.sender_avatar img { - margin-top: -2px; - width: 32px; - height: 32px; - border-radius: 16px; -} - -.sender_name { - display: inline; - font-size: 13px; - color: #a2a2a2; -} - -.message_time { - text-align: right; - width: 100px; - font-size: 11px; - color: #a2a2a2; -} - -.message_body { -} - -.notif_link td { - padding-top: 10px; - padding-bottom: 10px; - font-weight: bold; -} - -.notif_link a, .footer a { - color: #454545; - text-decoration: none; -} - -.debug { - font-size: 10px; - color: #888; -} - -.footer { - margin-top: 20px; - text-align: center; -} \ No newline at end of file diff --git a/res/templates/notif.html b/res/templates/notif.html deleted file mode 100644 index 88b921ca9c..0000000000 --- a/res/templates/notif.html +++ /dev/null @@ -1,45 +0,0 @@ -{% for message in notif.messages %} - - - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {% if message.sender_avatar_url %} - - {% else %} - {% if message.sender_hash % 3 == 0 %} - - {% elif message.sender_hash % 3 == 1 %} - - {% else %} - - {% endif %} - {% endif %} - {% endif %} - - - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
- {% endif %} -
- {% if message.msgtype == "m.text" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.emote" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.notice" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.image" %} - - {% elif message.msgtype == "m.file" %} - {{ message.body_text_plain }} - {% endif %} -
- - {{ message.ts|format_ts("%H:%M") }} - -{% endfor %} - - - - View {{ room.title }} - - - diff --git a/res/templates/notif.txt b/res/templates/notif.txt deleted file mode 100644 index a37bee9833..0000000000 --- a/res/templates/notif.txt +++ /dev/null @@ -1,16 +0,0 @@ -{% for message in notif.messages %} -{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) -{% if message.msgtype == "m.text" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.emote" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.notice" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.image" %} -{{ message.body_text_plain }} -{% elif message.msgtype == "m.file" %} -{{ message.body_text_plain }} -{% endif %} -{% endfor %} - -View {{ room.title }} at {{ notif.link }} diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html deleted file mode 100644 index fcdb3109fe..0000000000 --- a/res/templates/notif_mail.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - - - -
- - - - - -
-
Hi {{ user_display_name }},
-
{{ summary_text }}
-
- {% for room in rooms %} - {% include 'room.html' with context %} - {% endfor %} - -
- - diff --git a/res/templates/notif_mail.txt b/res/templates/notif_mail.txt deleted file mode 100644 index 24843042a5..0000000000 --- a/res/templates/notif_mail.txt +++ /dev/null @@ -1,10 +0,0 @@ -Hi {{ user_display_name }}, - -{{ summary_text }} - -{% for room in rooms %} -{% include 'room.txt' with context %} -{% endfor %} - -You can disable these notifications at {{ unsubscribe_link }} - diff --git a/res/templates/room.html b/res/templates/room.html deleted file mode 100644 index 723c222d25..0000000000 --- a/res/templates/room.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - {% if room.invite %} - - - - - - {% else %} - {% for notif in room.notifs %} - {% include 'notif.html' with context %} - {% endfor %} - {% endif %} -
- {% if room.avatar_url %} - - {% else %} - {% if room.hash % 3 == 0 %} - - {% elif room.hash % 3 == 1 %} - - {% else %} - - {% endif %} - {% endif %} - - {{ room.title }} -
- Join the conversation. -
diff --git a/res/templates/room.txt b/res/templates/room.txt deleted file mode 100644 index 84648c710e..0000000000 --- a/res/templates/room.txt +++ /dev/null @@ -1,9 +0,0 @@ -{{ room.title }} - -{% if room.invite %} - You've been invited, join at {{ room.link }} -{% else %} - {% for notif in room.notifs %} - {% include 'notif.txt' with context %} - {% endfor %} -{% endif %} diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index fe156b6930..862f92c6ae 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -13,11 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function + # This file can't be called email.py because if it is, we cannot: import email.utils +import logging +import os +import sys +import textwrap from ._base import Config +logger = logging.getLogger(__name__) + class EmailConfig(Config): def read_config(self, config): @@ -38,7 +46,6 @@ class EmailConfig(Config): "smtp_host", "smtp_port", "notif_from", - "template_dir", "notif_template_html", "notif_template_text", ] @@ -62,9 +69,27 @@ class EmailConfig(Config): self.email_smtp_host = email_config["smtp_host"] self.email_smtp_port = email_config["smtp_port"] self.email_notif_from = email_config["notif_from"] - self.email_template_dir = email_config["template_dir"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] + + self.email_template_dir = email_config.get("template_dir") + + # backwards-compatibility hack + if ( + self.email_template_dir == "res/templates" + and not os.path.isfile( + os.path.join(self.email_template_dir, self.email_notif_template_text) + ) + ): + t = """\ +WARNING: The email notifier is configured to look for templates in '%s', but no templates +could be found there. We will fall back to using the example templates; to get rid of this +warning, leave 'email.template_dir' unset. +""" % (self.email_template_dir,) + + print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) + self.email_template_dir = None + self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True ) @@ -113,7 +138,9 @@ class EmailConfig(Config): # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix - # template_dir: res/templates + # # if template_dir is unset, uses the example templates that are part of + # # the Synapse distribution. + # #template_dir: res/templates # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt # notif_for_new_users: True diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 1a5a10d974..b9dcfee740 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -528,7 +528,10 @@ def load_jinja2_templates(config): """ logger.info("loading jinja2") - loader = jinja2.FileSystemLoader(config.email_template_dir) + if config.email_template_dir: + loader = jinja2.FileSystemLoader(config.email_template_dir) + else: + loader = jinja2.PackageLoader('synapse', 'res/templates') env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) diff --git a/synapse/res/templates/mail-Vector.css b/synapse/res/templates/mail-Vector.css new file mode 100644 index 0000000000..6a3e36eda1 --- /dev/null +++ b/synapse/res/templates/mail-Vector.css @@ -0,0 +1,7 @@ +.header { + border-bottom: 4px solid #e4f7ed ! important; +} + +.notif_link a, .footer a { + color: #76CFA6 ! important; +} diff --git a/synapse/res/templates/mail.css b/synapse/res/templates/mail.css new file mode 100644 index 0000000000..5ab3e1b06d --- /dev/null +++ b/synapse/res/templates/mail.css @@ -0,0 +1,156 @@ +body { + margin: 0px; +} + +pre, code { + word-break: break-word; + white-space: pre-wrap; +} + +#page { + font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; + font-color: #454545; + font-size: 12pt; + width: 100%; + padding: 20px; +} + +#inner { + width: 640px; +} + +.header { + width: 100%; + height: 87px; + color: #454545; + border-bottom: 4px solid #e5e5e5; +} + +.logo { + text-align: right; + margin-left: 20px; +} + +.salutation { + padding-top: 10px; + font-weight: bold; +} + +.summarytext { +} + +.room { + width: 100%; + color: #454545; + border-bottom: 1px solid #e5e5e5; +} + +.room_header td { + padding-top: 38px; + padding-bottom: 10px; + border-bottom: 1px solid #e5e5e5; +} + +.room_name { + vertical-align: middle; + font-size: 18px; + font-weight: bold; +} + +.room_header h2 { + margin-top: 0px; + margin-left: 75px; + font-size: 20px; +} + +.room_avatar { + width: 56px; + line-height: 0px; + text-align: center; + vertical-align: middle; +} + +.room_avatar img { + width: 48px; + height: 48px; + object-fit: cover; + border-radius: 24px; +} + +.notif { + border-bottom: 1px solid #e5e5e5; + margin-top: 16px; + padding-bottom: 16px; +} + +.historical_message .sender_avatar { + opacity: 0.3; +} + +/* spell out opacity and historical_message class names for Outlook aka Word */ +.historical_message .sender_name { + color: #e3e3e3; +} + +.historical_message .message_time { + color: #e3e3e3; +} + +.historical_message .message_body { + color: #c7c7c7; +} + +.historical_message td, +.message td { + padding-top: 10px; +} + +.sender_avatar { + width: 56px; + text-align: center; + vertical-align: top; +} + +.sender_avatar img { + margin-top: -2px; + width: 32px; + height: 32px; + border-radius: 16px; +} + +.sender_name { + display: inline; + font-size: 13px; + color: #a2a2a2; +} + +.message_time { + text-align: right; + width: 100px; + font-size: 11px; + color: #a2a2a2; +} + +.message_body { +} + +.notif_link td { + padding-top: 10px; + padding-bottom: 10px; + font-weight: bold; +} + +.notif_link a, .footer a { + color: #454545; + text-decoration: none; +} + +.debug { + font-size: 10px; + color: #888; +} + +.footer { + margin-top: 20px; + text-align: center; +} \ No newline at end of file diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html new file mode 100644 index 0000000000..88b921ca9c --- /dev/null +++ b/synapse/res/templates/notif.html @@ -0,0 +1,45 @@ +{% for message in notif.messages %} + + + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {% if message.sender_avatar_url %} + + {% else %} + {% if message.sender_hash % 3 == 0 %} + + {% elif message.sender_hash % 3 == 1 %} + + {% else %} + + {% endif %} + {% endif %} + {% endif %} + + + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} +
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
+ {% endif %} +
+ {% if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.image" %} + + {% elif message.msgtype == "m.file" %} + {{ message.body_text_plain }} + {% endif %} +
+ + {{ message.ts|format_ts("%H:%M") }} + +{% endfor %} + + + + View {{ room.title }} + + + diff --git a/synapse/res/templates/notif.txt b/synapse/res/templates/notif.txt new file mode 100644 index 0000000000..a37bee9833 --- /dev/null +++ b/synapse/res/templates/notif.txt @@ -0,0 +1,16 @@ +{% for message in notif.messages %} +{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{% if message.msgtype == "m.text" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.emote" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.notice" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.image" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.file" %} +{{ message.body_text_plain }} +{% endif %} +{% endfor %} + +View {{ room.title }} at {{ notif.link }} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html new file mode 100644 index 0000000000..fcdb3109fe --- /dev/null +++ b/synapse/res/templates/notif_mail.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + +
+ + + + + +
+
Hi {{ user_display_name }},
+
{{ summary_text }}
+
+ {% for room in rooms %} + {% include 'room.html' with context %} + {% endfor %} + +
+ + diff --git a/synapse/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt new file mode 100644 index 0000000000..24843042a5 --- /dev/null +++ b/synapse/res/templates/notif_mail.txt @@ -0,0 +1,10 @@ +Hi {{ user_display_name }}, + +{{ summary_text }} + +{% for room in rooms %} +{% include 'room.txt' with context %} +{% endfor %} + +You can disable these notifications at {{ unsubscribe_link }} + diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html new file mode 100644 index 0000000000..723c222d25 --- /dev/null +++ b/synapse/res/templates/room.html @@ -0,0 +1,33 @@ + + + + + + {% if room.invite %} + + + + + + {% else %} + {% for notif in room.notifs %} + {% include 'notif.html' with context %} + {% endfor %} + {% endif %} +
+ {% if room.avatar_url %} + + {% else %} + {% if room.hash % 3 == 0 %} + + {% elif room.hash % 3 == 1 %} + + {% else %} + + {% endif %} + {% endif %} + + {{ room.title }} +
+ Join the conversation. +
diff --git a/synapse/res/templates/room.txt b/synapse/res/templates/room.txt new file mode 100644 index 0000000000..84648c710e --- /dev/null +++ b/synapse/res/templates/room.txt @@ -0,0 +1,9 @@ +{{ room.title }} + +{% if room.invite %} + You've been invited, join at {{ room.link }} +{% else %} + {% for notif in room.notifs %} + {% include 'notif.txt' with context %} + {% endfor %} +{% endif %} -- cgit 1.5.1 From c8f2c199910b70a5d94acf5597a4f3ba94cd3969 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 16:56:22 +0100 Subject: Put the warning blob at the top of the file --- synapse/config/emailconfig.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 862f92c6ae..e2582cfecc 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -26,6 +26,12 @@ from ._base import Config logger = logging.getLogger(__name__) +TEMPLATE_DIR_WARNING = """\ +WARNING: The email notifier is configured to look for templates in '%(template_dir)s', +but no templates could be found there. We will fall back to using the example templates; +to get rid of this warning, leave 'email.template_dir' unset. +""" + class EmailConfig(Config): def read_config(self, config): @@ -81,12 +87,9 @@ class EmailConfig(Config): os.path.join(self.email_template_dir, self.email_notif_template_text) ) ): - t = """\ -WARNING: The email notifier is configured to look for templates in '%s', but no templates -could be found there. We will fall back to using the example templates; to get rid of this -warning, leave 'email.template_dir' unset. -""" % (self.email_template_dir,) - + t = TEMPLATE_DIR_WARNING % { + "template_dir": self.email_template_dir, + } print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) self.email_template_dir = None -- cgit 1.5.1 From 0fd2321629f4a06bec8b552299d3ced32a7a021c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 16 Oct 2018 20:37:16 +0100 Subject: Fix incorrect truncation in get_missing_events It's quite important that get_missing_events returns the *latest* events in the room; however we were pulling event ids out of the database until we got *at least* 10, and then taking the *earliest* of the results. We also shouldn't really be relying on depth, and should be checking the room_id. --- changelog.d/4045.bugfix | 1 + synapse/federation/federation_server.py | 8 +++---- synapse/federation/transport/server.py | 2 -- synapse/handlers/federation.py | 12 +++++------ synapse/storage/event_federation.py | 38 ++++++++++++++------------------- 5 files changed, 26 insertions(+), 35 deletions(-) create mode 100644 changelog.d/4045.bugfix (limited to 'synapse') diff --git a/changelog.d/4045.bugfix b/changelog.d/4045.bugfix new file mode 100644 index 0000000000..fa50eb5aff --- /dev/null +++ b/changelog.d/4045.bugfix @@ -0,0 +1 @@ +Fix bug which made get_missing_events return too few events \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 819e8f7331..4efe95faa4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -507,19 +507,19 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): with (yield self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) yield self.check_server_matches_acl(origin_host, room_id) logger.info( "on_get_missing_events: earliest_events: %r, latest_events: %r," - " limit: %d, min_depth: %d", - earliest_events, latest_events, limit, min_depth + " limit: %d", + earliest_events, latest_events, limit, ) missing_events = yield self.handler.on_get_missing_events( - origin, room_id, earliest_events, latest_events, limit, min_depth + origin, room_id, earliest_events, latest_events, limit, ) if len(missing_events) < 5: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2f874b4838..7288d49074 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -560,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, room_id): limit = int(content.get("limit", 10)) - min_depth = int(content.get("min_depth", 0)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) @@ -569,7 +568,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet): room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, - min_depth=min_depth, limit=limit, ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 45d955e6f5..cab57a8849 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -309,8 +309,8 @@ class FederationHandler(BaseHandler): if sent_to_us_directly: logger.warn( - "[%s %s] Failed to fetch %d prev events: rejecting", - room_id, event_id, len(prevs - seen), + "[%s %s] Rejecting: failed to fetch %d prev events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) raise FederationError( "ERROR", @@ -452,8 +452,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "[%s %s]: Requesting %d prev_events: %s", - room_id, event_id, len(prevs - seen), shortstr(prevs - seen) + "[%s %s]: Requesting missing events between %s and %s", + room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround @@ -1852,7 +1852,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def on_get_missing_events(self, origin, room_id, earliest_events, - latest_events, limit, min_depth): + latest_events, limit): in_room = yield self.auth.check_host_in_room( room_id, origin @@ -1861,14 +1861,12 @@ class FederationHandler(BaseHandler): raise AuthError(403, "Host not in room.") limit = min(limit, 20) - min_depth = max(min_depth, 0) missing_events = yield self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, - min_depth=min_depth, ) missing_events = yield filter_events_for_server( diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 24345b20a6..3faca2a042 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -376,33 +376,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, @defer.inlineCallbacks def get_missing_events(self, room_id, earliest_events, latest_events, - limit, min_depth): + limit): ids = yield self.runInteraction( "get_missing_events", self._get_missing_events, - room_id, earliest_events, latest_events, limit, min_depth + room_id, earliest_events, latest_events, limit, ) - events = yield self._get_events(ids) - - events = sorted( - [ev for ev in events if ev.depth >= min_depth], - key=lambda e: e.depth, - ) - - defer.returnValue(events[:limit]) + defer.returnValue(events) def _get_missing_events(self, txn, room_id, earliest_events, latest_events, - limit, min_depth): - - earliest_events = set(earliest_events) - front = set(latest_events) - earliest_events + limit): - event_results = set() + seen_events = set(earliest_events) + front = set(latest_events) - seen_events + event_results = [] query = ( "SELECT prev_event_id FROM event_edges " - "WHERE event_id = ? AND is_state = ? " + "WHERE room_id = ? AND event_id = ? AND is_state = ? " "LIMIT ?" ) @@ -411,18 +403,20 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, for event_id in front: txn.execute( query, - (event_id, False, limit - len(event_results)) + (room_id, event_id, False, limit - len(event_results)) ) - for e_id, in txn: - new_front.add(e_id) + new_results = set(t[0] for t in txn) - seen_events - new_front -= earliest_events - new_front -= event_results + new_front |= new_results + seen_events |= new_results + event_results.extend(new_results) front = new_front - event_results |= new_front + # we built the list working backwards from latest_events; we now need to + # reverse it so that the events are approximately chronological. + event_results.reverse() return event_results -- cgit 1.5.1 From c7d0f34a3c0217be4ca06d934b66fcdfc22a7219 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 17 Oct 2018 17:40:19 +0100 Subject: v0.33.7rc2 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index ea07fda14a..a0c4ef6488 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7rc1" +__version__ = "0.33.7rc2" -- cgit 1.5.1 From 926da4dda84178b83fa00813d31f1bceef25171f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Oct 2018 14:57:32 +0100 Subject: 0.33.7 --- CHANGES.md | 7 +++++-- synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/CHANGES.md b/CHANGES.md index 7de66d6ab4..5f598559a0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 0.33.7rc2 (2018-10-17) -============================== +Synapse 0.33.7 (2018-10-18) +=========================== **Warning**: This release removes the example email notification templates from `res/templates` (they are now internal to the python package). This should only @@ -15,6 +15,9 @@ The configuration parser will try to detect the situation where `email.template_dir` is incorrectly set to `res/templates` and do the right thing, but will warn about this. +Synapse 0.33.7rc2 (2018-10-17) +============================== + Features -------- diff --git a/synapse/__init__.py b/synapse/__init__.py index a0c4ef6488..1ddbbbebfb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7rc2" +__version__ = "0.33.7" -- cgit 1.5.1 From a36b0ec195c4fe88730d656c8668a5dda1a6f1a7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 09:24:00 +1100 Subject: make a bytestring --- synapse/util/manhole.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 8d0f2a8918..cf43ab6a19 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -82,7 +82,7 @@ def manhole(username, password, globals): ) factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker])) - factory.publicKeys['ssh-rsa'] = Key.fromString(PUBLIC_KEY) - factory.privateKeys['ssh-rsa'] = Key.fromString(PRIVATE_KEY) + factory.publicKeys[b'ssh-rsa'] = Key.fromString(PUBLIC_KEY) + factory.privateKeys[b'ssh-rsa'] = Key.fromString(PRIVATE_KEY) return factory -- cgit 1.5.1 From 74e761708398d5170783912f02d120f20113205e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:14:24 +0100 Subject: Clean up room alias creation --- synapse/handlers/directory.py | 77 ++++++++++++++++++++++--------------- synapse/handlers/room.py | 5 ++- synapse/rest/client/v1/directory.py | 37 +++--------------- 3 files changed, 55 insertions(+), 64 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 18741c5fac..02f12f6645 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -80,42 +80,60 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def create_association(self, user_id, room_alias, room_id, servers=None): - # association creation for human users - # TODO(erikj): Do user auth. + def create_association(self, requester, room_alias, room_id, servers=None, + send_event=True): + """Attempt to create a new alias - if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): - raise SynapseError( - 403, "This user is not permitted to create this alias", - ) + Args: + requester (Requester) + room_alias (RoomAlias) + room_id (str) + servers (list[str]|None): List of servers that others servers + should try and join via + send_event (bool): Whether to send an updated m.room.aliases event - can_create = yield self.can_modify_alias( - room_alias, - user_id=user_id - ) - if not can_create: - raise SynapseError( - 400, "This alias is reserved by an application service.", - errcode=Codes.EXCLUSIVE - ) - yield self._create_association(room_alias, room_id, servers, creator=user_id) + Returns: + Deferred + """ - @defer.inlineCallbacks - def create_appservice_association(self, service, room_alias, room_id, - servers=None): - if not service.is_interested_in_alias(room_alias.to_string()): - raise SynapseError( - 400, "This application service has not reserved" - " this kind of alias.", errcode=Codes.EXCLUSIVE + user_id = requester.user.to_string() + + service = requester.app_service + if service: + if not service.is_interested_in_alias(room_alias.to_string()): + raise SynapseError( + 400, "This application service has not reserved" + " this kind of alias.", errcode=Codes.EXCLUSIVE + ) + else: + if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): + raise AuthError( + 403, "This user is not permitted to create this alias", + ) + + can_create = yield self.can_modify_alias( + room_alias, + user_id=user_id ) + if not can_create: + raise AuthError( + 400, "This alias is reserved by an application service.", + errcode=Codes.EXCLUSIVE + ) - # association creation for app services - yield self._create_association(room_alias, room_id, servers) + yield self._create_association(room_alias, room_id, servers, creator=user_id) + if send_event: + yield self.send_room_alias_update_event( + requester, + room_id + ) @defer.inlineCallbacks - def delete_association(self, requester, user_id, room_alias): + def delete_association(self, requester, room_alias): # association deletion for human users + user_id = requester.user.to_string() + try: can_delete = yield self._user_can_delete_alias(room_alias, user_id) except StoreError as e: @@ -143,7 +161,6 @@ class DirectoryHandler(BaseHandler): try: yield self.send_room_alias_update_event( requester, - requester.user.to_string(), room_id ) @@ -261,7 +278,7 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def send_room_alias_update_event(self, requester, user_id, room_id): + def send_room_alias_update_event(self, requester, room_id): aliases = yield self.store.get_aliases_for_room(room_id) yield self.event_creation_handler.create_and_send_nonmember_event( @@ -270,7 +287,7 @@ class DirectoryHandler(BaseHandler): "type": EventTypes.Aliases, "state_key": self.hs.hostname, "room_id": room_id, - "sender": user_id, + "sender": requester.user.to_string(), "content": {"aliases": aliases}, }, ratelimit=False diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index c3f820b975..ab1571b27b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -190,10 +190,11 @@ class RoomCreationHandler(BaseHandler): if room_alias: directory_handler = self.hs.get_handlers().directory_handler yield directory_handler.create_association( - user_id=user_id, + requester=requester, room_id=room_id, room_alias=room_alias, servers=[self.hs.hostname], + send_event=False, ) preset_config = config.get( @@ -289,7 +290,7 @@ class RoomCreationHandler(BaseHandler): if room_alias: result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event( - requester, user_id, room_id + requester, room_id ) defer.returnValue(result) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 97733f3026..0220acf644 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -74,38 +74,11 @@ class ClientDirectoryServer(ClientV1RestServlet): if room is None: raise SynapseError(400, "Room does not exist") - dir_handler = self.handlers.directory_handler + requester = yield self.auth.get_user_by_req(request) - try: - # try to auth as a user - requester = yield self.auth.get_user_by_req(request) - try: - user_id = requester.user.to_string() - yield dir_handler.create_association( - user_id, room_alias, room_id, servers - ) - yield dir_handler.send_room_alias_update_event( - requester, - user_id, - room_id - ) - except SynapseError as e: - raise e - except Exception: - logger.exception("Failed to create association") - raise - except AuthError: - # try to auth as an application service - service = yield self.auth.get_appservice_by_req(request) - yield dir_handler.create_appservice_association( - service, room_alias, room_id, servers - ) - logger.info( - "Application service at %s created alias %s pointing to %s", - service.url, - room_alias.to_string(), - room_id - ) + yield self.handlers.directory_handler.create_association( + requester, room_alias, room_id, servers + ) defer.returnValue((200, {})) @@ -135,7 +108,7 @@ class ClientDirectoryServer(ClientV1RestServlet): room_alias = RoomAlias.from_string(room_alias) yield dir_handler.delete_association( - requester, user.to_string(), room_alias + requester, room_alias ) logger.info( -- cgit 1.5.1 From 084046456ec88588779a62f9378c1a8e911bfc7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:14:04 +0100 Subject: Add config option to control alias creation --- synapse/config/homeserver.py | 3 +- synapse/config/room_directory.py | 101 ++++++++++++++++++++++++++++++++ synapse/federation/federation_server.py | 16 +---- synapse/handlers/directory.py | 9 +++ synapse/util/__init__.py | 21 +++++++ 5 files changed, 135 insertions(+), 15 deletions(-) create mode 100644 synapse/config/room_directory.py (limited to 'synapse') diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index b8d5690f2b..10dd40159f 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -31,6 +31,7 @@ from .push import PushConfig from .ratelimiting import RatelimitConfig from .registration import RegistrationConfig from .repository import ContentRepositoryConfig +from .room_directory import RoomDirectoryConfig from .saml2 import SAML2Config from .server import ServerConfig from .server_notices_config import ServerNoticesConfig @@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, WorkerConfig, PasswordAuthProviderConfig, PushConfig, SpamCheckerConfig, GroupsConfig, UserDirectoryConfig, ConsentConfig, - ServerNoticesConfig, + ServerNoticesConfig, RoomDirectoryConfig, ): pass diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py new file mode 100644 index 0000000000..41ef3217e8 --- /dev/null +++ b/synapse/config/room_directory.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util import glob_to_regex + +from ._base import Config, ConfigError + + +class RoomDirectoryConfig(Config): + def read_config(self, config): + alias_creation_rules = config["alias_creation_rules"] + + self._alias_creation_rules = [ + _AliasRule(rule) + for rule in alias_creation_rules + ] + + def default_config(self, config_dir_path, server_name, **kwargs): + return """ + # The `alias_creation` option controls who's allowed to create aliases + # on this server. + # + # The format of this option is a list of rules that contain globs that + # match against user_id and the new alias (fully qualified with server + # name). The action in the first rule that matches is taken, which can + # currently either be "allowed" or "denied". + # + # If no rules match the request is denied. + alias_creation_rules: + - user_id: "*" + alias: "*" + action: allowed + """ + + def is_alias_creation_allowed(self, user_id, alias): + """Checks if the given user is allowed to create the given alias + + Args: + user_id (str) + alias (str) + + Returns: + boolean: True if user is allowed to crate the alias + """ + for rule in self._alias_creation_rules: + if rule.matches(user_id, alias): + return rule.action == "allowed" + + return False + + +class _AliasRule(object): + def __init__(self, rule): + action = rule["action"] + user_id = rule["user_id"] + alias = rule["alias"] + + if action in ("allowed", "denied"): + self.action = action + else: + raise ConfigError( + "alias_creation_rules rules can only have action of 'allowed'" + " or 'denied'" + ) + + try: + self._user_id_regex = glob_to_regex(user_id) + self._alias_regex = glob_to_regex(alias) + except Exception as e: + raise ConfigError("Failed to parse glob into regex: %s", e) + + def matches(self, user_id, alias): + """Tests if this rule matches the given user_id and alias. + + Args: + user_id (str) + alias (str) + + Returns: + boolean + """ + + if not self._user_id_regex.search(user_id): + return False + + if not self._alias_regex.search(alias): + return False + + return True diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 4efe95faa4..d041c26824 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re import six from six import iteritems @@ -44,6 +43,7 @@ from synapse.replication.http.federation import ( ReplicationGetQueryRestServlet, ) from synapse.types import get_domain_from_id +from synapse.util import glob_to_regex from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache from synapse.util.logcontext import nested_logging_context @@ -729,22 +729,10 @@ def _acl_entry_matches(server_name, acl_entry): if not isinstance(acl_entry, six.string_types): logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)) return False - regex = _glob_to_regex(acl_entry) + regex = glob_to_regex(acl_entry) return regex.match(server_name) -def _glob_to_regex(glob): - res = '' - for c in glob: - if c == '*': - res = res + '.*' - elif c == '?': - res = res + '.' - else: - res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) - - class FederationHandlerRegistry(object): """Allows classes to register themselves as handlers for a given EDU or query type for incoming federation traffic. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 02f12f6645..7d67bf803a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -43,6 +43,7 @@ class DirectoryHandler(BaseHandler): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.config = hs.config self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -111,6 +112,14 @@ class DirectoryHandler(BaseHandler): 403, "This user is not permitted to create this alias", ) + if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()): + # Lets just return a generic message, as there may be all sorts of + # reasons why we said no. TODO: Allow configurable error messages + # per alias creation rule? + raise SynapseError( + 403, "Not allowed to create alias", + ) + can_create = yield self.can_modify_alias( room_alias, user_id=user_id diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9a8fae0497..163e4b35ff 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +import re from itertools import islice import attr @@ -138,3 +139,23 @@ def log_failure(failure, msg, consumeErrors=True): if not consumeErrors: return failure + + +def glob_to_regex(glob): + """Converts a glob to a compiled regex object + + Args: + glob (str) + + Returns: + re.RegexObject + """ + res = '' + for c in glob: + if c == '*': + res = res + '.*' + elif c == '?': + res = res + '.' + else: + res = res + re.escape(c) + return re.compile(res + "\\Z", re.IGNORECASE) -- cgit 1.5.1 From 9fafdfa97d87006177d13d4b80aeebfc4ded4bee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 14:21:09 +0100 Subject: Anchor returned regex to start and end of string --- synapse/util/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 163e4b35ff..0ae7e2ef3b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -142,7 +142,9 @@ def log_failure(failure, msg, consumeErrors=True): def glob_to_regex(glob): - """Converts a glob to a compiled regex object + """Converts a glob to a compiled regex object. + + The regex is anchored at the beginning and end of the string. Args: glob (str) @@ -158,4 +160,6 @@ def glob_to_regex(glob): res = res + '.' else: res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) + + # \A anchors at start of string, \Z at end of string + return re.compile(r"\A" + res + r"\Z", re.IGNORECASE) -- cgit 1.5.1 From b69216f7680b92ccd8f92e618e1a81c9ba3c3346 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 21:45:45 +1100 Subject: Make the metrics less racy (#4061) --- changelog.d/4061.bugfix | 1 + synapse/http/request_metrics.py | 31 ++++++++++++++++++------------- synapse/notifier.py | 6 +++--- 3 files changed, 22 insertions(+), 16 deletions(-) create mode 100644 changelog.d/4061.bugfix (limited to 'synapse') diff --git a/changelog.d/4061.bugfix b/changelog.d/4061.bugfix new file mode 100644 index 0000000000..94ffcf7a51 --- /dev/null +++ b/changelog.d/4061.bugfix @@ -0,0 +1 @@ +Fix some metrics being racy and causing exceptions when polled by Prometheus. diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index fedb4e6b18..62045a918b 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -39,7 +39,8 @@ outgoing_responses_counter = Counter( ) response_timer = Histogram( - "synapse_http_server_response_time_seconds", "sec", + "synapse_http_server_response_time_seconds", + "sec", ["method", "servlet", "tag", "code"], ) @@ -79,15 +80,11 @@ response_size = Counter( # than when the response was written. in_flight_requests_ru_utime = Counter( - "synapse_http_server_in_flight_requests_ru_utime_seconds", - "", - ["method", "servlet"], + "synapse_http_server_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"] ) in_flight_requests_ru_stime = Counter( - "synapse_http_server_in_flight_requests_ru_stime_seconds", - "", - ["method", "servlet"], + "synapse_http_server_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"] ) in_flight_requests_db_txn_count = Counter( @@ -134,7 +131,7 @@ def _get_in_flight_counts(): # type counts = {} for rm in reqs: - key = (rm.method, rm.name,) + key = (rm.method, rm.name) counts[key] = counts.get(key, 0) + 1 return counts @@ -175,7 +172,8 @@ class RequestMetrics(object): if context != self.start_context: logger.warn( "Context have unexpectedly changed %r, %r", - context, self.start_context + context, + self.start_context, ) return @@ -192,10 +190,10 @@ class RequestMetrics(object): resource_usage = context.get_resource_usage() response_ru_utime.labels(self.method, self.name, tag).inc( - resource_usage.ru_utime, + resource_usage.ru_utime ) response_ru_stime.labels(self.method, self.name, tag).inc( - resource_usage.ru_stime, + resource_usage.ru_stime ) response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count @@ -222,8 +220,15 @@ class RequestMetrics(object): diff = new_stats - self._request_stats self._request_stats = new_stats - in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) - in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) + # max() is used since rapid use of ru_stime/ru_utime can end up with the + # count going backwards due to NTP, time smearing, fine-grained + # correction, or floating points. Who knows, really? + in_flight_requests_ru_utime.labels(self.method, self.name).inc( + max(diff.ru_utime, 0) + ) + in_flight_requests_ru_stime.labels(self.method, self.name).inc( + max(diff.ru_stime, 0) + ) in_flight_requests_db_txn_count.labels(self.method, self.name).inc( diff.db_txn_count diff --git a/synapse/notifier.py b/synapse/notifier.py index 340b16ce25..de02b1017e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -186,9 +186,9 @@ class Notifier(object): def count_listeners(): all_user_streams = set() - for x in self.room_to_user_streams.values(): + for x in list(self.room_to_user_streams.values()): all_user_streams |= x - for x in self.user_to_user_stream.values(): + for x in list(self.user_to_user_stream.values()): all_user_streams.add(x) return sum(stream.count_listeners() for stream in all_user_streams) @@ -196,7 +196,7 @@ class Notifier(object): LaterGauge( "synapse_notifier_rooms", "", [], - lambda: count(bool, self.room_to_user_streams.values()), + lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( "synapse_notifier_users", "", [], -- cgit 1.5.1 From e404ba9aacb426efc7d815cadfabd54dcf576e3b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Oct 2018 22:26:00 +1100 Subject: Fix manhole on py3 (pt 2) (#4067) --- changelog.d/4067.bugfix | 1 + synapse/util/manhole.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/4067.bugfix (limited to 'synapse') diff --git a/changelog.d/4067.bugfix b/changelog.d/4067.bugfix new file mode 100644 index 0000000000..78d69a8819 --- /dev/null +++ b/changelog.d/4067.bugfix @@ -0,0 +1 @@ +Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index cf43ab6a19..9cb7e9c9ab 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -70,6 +70,8 @@ def manhole(username, password, globals): Returns: twisted.internet.protocol.Factory: A factory to pass to ``listenTCP`` """ + if not isinstance(password, bytes): + password = password.encode('ascii') checker = checkers.InMemoryUsernamePasswordDatabaseDontUse( **{username: password} -- cgit 1.5.1 From cc325c7069599ac0f9b8c31aa3a472d5ea041a4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Oct 2018 14:01:59 +0100 Subject: Calculate absolute path for email templates --- changelog.d/4068.bugfix | 1 + synapse/config/emailconfig.py | 40 ++++++++++++++++++---------------------- synapse/push/mailer.py | 8 ++------ 3 files changed, 21 insertions(+), 28 deletions(-) create mode 100644 changelog.d/4068.bugfix (limited to 'synapse') diff --git a/changelog.d/4068.bugfix b/changelog.d/4068.bugfix new file mode 100644 index 0000000000..74bda7491f --- /dev/null +++ b/changelog.d/4068.bugfix @@ -0,0 +1 @@ +Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. \ No newline at end of file diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index e2582cfecc..93d70cff14 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -19,18 +19,12 @@ from __future__ import print_function import email.utils import logging import os -import sys -import textwrap -from ._base import Config +import pkg_resources -logger = logging.getLogger(__name__) +from ._base import Config, ConfigError -TEMPLATE_DIR_WARNING = """\ -WARNING: The email notifier is configured to look for templates in '%(template_dir)s', -but no templates could be found there. We will fall back to using the example templates; -to get rid of this warning, leave 'email.template_dir' unset. -""" +logger = logging.getLogger(__name__) class EmailConfig(Config): @@ -78,20 +72,22 @@ class EmailConfig(Config): self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_template_dir = email_config.get("template_dir") - - # backwards-compatibility hack - if ( - self.email_template_dir == "res/templates" - and not os.path.isfile( - os.path.join(self.email_template_dir, self.email_notif_template_text) + template_dir = email_config.get("template_dir") + # we need an absolute path, because we change directory after starting (and + # we don't yet know what auxilliary templates like mail.css we will need). + # (Note that loading as package_resources with jinja.PackageLoader doesn't + # work for the same reason.) + if not template_dir: + template_dir = pkg_resources.resource_filename( + 'synapse', 'res/templates' ) - ): - t = TEMPLATE_DIR_WARNING % { - "template_dir": self.email_template_dir, - } - print(textwrap.fill(t, width=80) + "\n", file=sys.stderr) - self.email_template_dir = None + template_dir = os.path.abspath(template_dir) + + for f in self.email_notif_template_text, self.email_notif_template_html: + p = os.path.join(template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find email template file %s" % (p, )) + self.email_template_dir = template_dir self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b9dcfee740..16fb5e8471 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -526,12 +526,8 @@ def load_jinja2_templates(config): Returns: (notif_template_html, notif_template_text) """ - logger.info("loading jinja2") - - if config.email_template_dir: - loader = jinja2.FileSystemLoader(config.email_template_dir) - else: - loader = jinja2.PackageLoader('synapse', 'res/templates') + logger.info("loading email templates from '%s'", config.email_template_dir) + loader = jinja2.FileSystemLoader(config.email_template_dir) env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) -- cgit 1.5.1 From e5b52d0f945962a5f4bef5e7a2fb6445dfc0d8f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Oct 2018 21:49:26 +0100 Subject: Make psutil an explicit dependency As of #4027, we require psutil to be installed, so it should be in our dependency list. We can also remove some of the conditional import code introduced by #992. Fixes #4062. --- changelog.d/4073.misc | 1 + synapse/app/homeserver.py | 11 ++++------- synapse/python_dependencies.py | 4 +--- 3 files changed, 6 insertions(+), 10 deletions(-) create mode 100644 changelog.d/4073.misc (limited to 'synapse') diff --git a/changelog.d/4073.misc b/changelog.d/4073.misc new file mode 100644 index 0000000000..fc304bef06 --- /dev/null +++ b/changelog.d/4073.misc @@ -0,0 +1 @@ +Add psutil as an explicit dependency diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..a0c0fc78ec 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -20,6 +20,7 @@ import sys from six import iteritems +import psutil from prometheus_client import Gauge from twisted.application import service @@ -502,7 +503,6 @@ def run(hs): def performance_stats_init(): try: - import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. @@ -510,12 +510,9 @@ def run(hs): process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) - except (ImportError, AttributeError): - logger.warn( - "report_stats enabled but psutil is not installed or incorrect version." - " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix.org track performance" - " changes across releases." + except (AttributeError): + logger.warning( + "Unable to read memory/cpu stats. Disabling reporting." ) def generate_user_daily_visit_stats(): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f51184b50d..943876456b 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -53,6 +53,7 @@ REQUIREMENTS = { "pillow>=3.1.2": ["PIL"], "pydenticon>=0.2": ["pydenticon"], "sortedcontainers>=1.4.4": ["sortedcontainers"], + "psutil>=2.0.0": ["psutil>=2.0.0"], "pysaml2>=3.0.0": ["saml2"], "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], "msgpack-python>=0.4.2": ["msgpack"], @@ -79,9 +80,6 @@ CONDITIONAL_REQUIREMENTS = { "matrix-synapse-ldap3": { "matrix-synapse-ldap3>=0.1": ["ldap_auth_provider"], }, - "psutil": { - "psutil>=2.0.0": ["psutil>=2.0.0"], - }, "postgres": { "psycopg2>=2.6": ["psycopg2"] } -- cgit 1.5.1 From e1728dfcbe585edfb590bce50adeaab341a70db8 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 20 Oct 2018 11:16:55 +1100 Subject: Make scripts/ and scripts-dev/ pass pyflakes (and the rest of the codebase on py3) (#4068) --- .travis.yml | 2 +- changelog.d/4068.misc | 1 + scripts-dev/check_auth.py | 36 ++-- scripts-dev/check_event_hash.py | 32 ++-- scripts-dev/check_signature.py | 36 ++-- scripts-dev/convert_server_keys.py | 40 +++-- scripts-dev/definitions.py | 54 +++--- scripts-dev/dump_macaroon.py | 13 +- scripts-dev/federation_client.py | 99 +++++------ scripts-dev/hash_history.py | 62 ++++--- scripts-dev/list_url_patterns.py | 16 +- scripts-dev/tail-synapse.py | 24 +-- scripts/hash_password | 5 +- scripts/move_remote_media_to_new_store.py | 36 ++-- scripts/register_new_matrix_user | 76 ++++---- scripts/synapse_port_db | 272 +++++++++++++---------------- synapse/app/homeserver.py | 2 +- synapse/config/__main__.py | 2 +- synapse/config/_base.py | 121 +++++++------ synapse/storage/_base.py | 4 +- synapse/storage/keys.py | 2 +- synapse/storage/pusher.py | 2 +- synapse/storage/signatures.py | 2 +- synapse/storage/transactions.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 +- synctl | 80 ++++----- tox.ini | 4 +- 27 files changed, 511 insertions(+), 518 deletions(-) create mode 100644 changelog.d/4068.misc (limited to 'synapse') diff --git a/.travis.yml b/.travis.yml index 2077f6af72..7ee1a278db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ matrix: - python: 2.7 env: TOX_ENV=packaging - - python: 2.7 + - python: 3.6 env: TOX_ENV=pep8 - python: 2.7 diff --git a/changelog.d/4068.misc b/changelog.d/4068.misc new file mode 100644 index 0000000000..db6c4ade59 --- /dev/null +++ b/changelog.d/4068.misc @@ -0,0 +1 @@ +Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. diff --git a/scripts-dev/check_auth.py b/scripts-dev/check_auth.py index 4fa8792a5f..b3d11f49ec 100644 --- a/scripts-dev/check_auth.py +++ b/scripts-dev/check_auth.py @@ -1,21 +1,20 @@ -from synapse.events import FrozenEvent -from synapse.api.auth import Auth - -from mock import Mock +from __future__ import print_function import argparse import itertools import json import sys +from mock import Mock + +from synapse.api.auth import Auth +from synapse.events import FrozenEvent + def check_auth(auth, auth_chain, events): auth_chain.sort(key=lambda e: e.depth) - auth_map = { - e.event_id: e - for e in auth_chain - } + auth_map = {e.event_id: e for e in auth_chain} create_events = {} for e in auth_chain: @@ -25,31 +24,26 @@ def check_auth(auth, auth_chain, events): for e in itertools.chain(auth_chain, events): auth_events_list = [auth_map[i] for i, _ in e.auth_events] - auth_events = { - (e.type, e.state_key): e - for e in auth_events_list - } + auth_events = {(e.type, e.state_key): e for e in auth_events_list} auth_events[("m.room.create", "")] = create_events[e.room_id] try: auth.check(e, auth_events=auth_events) except Exception as ex: - print "Failed:", e.event_id, e.type, e.state_key - print "Auth_events:", auth_events - print ex - print json.dumps(e.get_dict(), sort_keys=True, indent=4) + print("Failed:", e.event_id, e.type, e.state_key) + print("Auth_events:", auth_events) + print(ex) + print(json.dumps(e.get_dict(), sort_keys=True, indent=4)) # raise - print "Success:", e.event_id, e.type, e.state_key + print("Success:", e.event_id, e.type, e.state_key) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( - 'json', - nargs='?', - type=argparse.FileType('r'), - default=sys.stdin, + 'json', nargs='?', type=argparse.FileType('r'), default=sys.stdin ) args = parser.parse_args() diff --git a/scripts-dev/check_event_hash.py b/scripts-dev/check_event_hash.py index 7ccae34d48..8535f99697 100644 --- a/scripts-dev/check_event_hash.py +++ b/scripts-dev/check_event_hash.py @@ -1,10 +1,15 @@ -from synapse.crypto.event_signing import * -from unpaddedbase64 import encode_base64 - import argparse import hashlib -import sys import json +import logging +import sys + +from unpaddedbase64 import encode_base64 + +from synapse.crypto.event_signing import ( + check_event_content_hash, + compute_event_reference_hash, +) class dictobj(dict): @@ -24,27 +29,26 @@ class dictobj(dict): def main(): parser = argparse.ArgumentParser() - parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'), - default=sys.stdin) + parser.add_argument( + "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin + ) args = parser.parse_args() logging.basicConfig() event_json = dictobj(json.load(args.input_json)) - algorithms = { - "sha256": hashlib.sha256, - } + algorithms = {"sha256": hashlib.sha256} for alg_name in event_json.hashes: if check_event_content_hash(event_json, algorithms[alg_name]): - print "PASS content hash %s" % (alg_name,) + print("PASS content hash %s" % (alg_name,)) else: - print "FAIL content hash %s" % (alg_name,) + print("FAIL content hash %s" % (alg_name,)) for algorithm in algorithms.values(): name, h_bytes = compute_event_reference_hash(event_json, algorithm) - print "Reference hash %s: %s" % (name, encode_base64(h_bytes)) + print("Reference hash %s: %s" % (name, encode_base64(h_bytes))) -if __name__=="__main__": - main() +if __name__ == "__main__": + main() diff --git a/scripts-dev/check_signature.py b/scripts-dev/check_signature.py index 079577908a..612f17ca7f 100644 --- a/scripts-dev/check_signature.py +++ b/scripts-dev/check_signature.py @@ -1,15 +1,15 @@ -from signedjson.sign import verify_signed_json -from signedjson.key import decode_verify_key_bytes, write_signing_keys -from unpaddedbase64 import decode_base64 - -import urllib2 +import argparse import json +import logging import sys +import urllib2 + import dns.resolver -import pprint -import argparse -import logging +from signedjson.key import decode_verify_key_bytes, write_signing_keys +from signedjson.sign import verify_signed_json +from unpaddedbase64 import decode_base64 + def get_targets(server_name): if ":" in server_name: @@ -23,6 +23,7 @@ def get_targets(server_name): except dns.resolver.NXDOMAIN: yield (server_name, 8448) + def get_server_keys(server_name, target, port): url = "https://%s:%i/_matrix/key/v1" % (target, port) keys = json.load(urllib2.urlopen(url)) @@ -33,12 +34,14 @@ def get_server_keys(server_name, target, port): verify_keys[key_id] = verify_key return verify_keys + def main(): parser = argparse.ArgumentParser() parser.add_argument("signature_name") - parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'), - default=sys.stdin) + parser.add_argument( + "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin + ) args = parser.parse_args() logging.basicConfig() @@ -48,24 +51,23 @@ def main(): for target, port in get_targets(server_name): try: keys = get_server_keys(server_name, target, port) - print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port) + print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port)) write_signing_keys(sys.stdout, keys.values()) break - except: + except Exception: logging.exception("Error talking to %s:%s", target, port) json_to_check = json.load(args.input_json) - print "Checking JSON:" + print("Checking JSON:") for key_id in json_to_check["signatures"][args.signature_name]: try: key = keys[key_id] verify_signed_json(json_to_check, args.signature_name, key) - print "PASS %s" % (key_id,) - except: + print("PASS %s" % (key_id,)) + except Exception: logging.exception("Check for key %s failed" % (key_id,)) - print "FAIL %s" % (key_id,) + print("FAIL %s" % (key_id,)) if __name__ == '__main__': main() - diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py index 151551f22c..dde8596697 100644 --- a/scripts-dev/convert_server_keys.py +++ b/scripts-dev/convert_server_keys.py @@ -1,13 +1,21 @@ -import psycopg2 -import yaml -import sys +import hashlib import json +import sys import time -import hashlib -from unpaddedbase64 import encode_base64 + +import six + +import psycopg2 +import yaml +from canonicaljson import encode_canonical_json from signedjson.key import read_signing_keys from signedjson.sign import sign_json -from canonicaljson import encode_canonical_json +from unpaddedbase64 import encode_base64 + +if six.PY2: + db_type = six.moves.builtins.buffer +else: + db_type = memoryview def select_v1_keys(connection): @@ -39,7 +47,9 @@ def select_v2_json(connection): cursor.close() results = {} for server_name, key_id, key_json in rows: - results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8")) + results.setdefault(server_name, {})[key_id] = json.loads( + str(key_json).decode("utf-8") + ) return results @@ -47,10 +57,7 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate): return { "old_verify_keys": {}, "server_name": server_name, - "verify_keys": { - key_id: {"key": key} - for key_id, key in keys.items() - }, + "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()}, "valid_until_ts": valid_until, "tls_fingerprints": [fingerprint(certificate)], } @@ -65,7 +72,7 @@ def rows_v2(server, json): valid_until = json["valid_until_ts"] key_json = encode_canonical_json(json) for key_id in json["verify_keys"]: - yield (server, key_id, "-", valid_until, valid_until, buffer(key_json)) + yield (server, key_id, "-", valid_until, valid_until, db_type(key_json)) def main(): @@ -87,7 +94,7 @@ def main(): result = {} for server in keys: - if not server in json: + if server not in json: v2_json = convert_v1_to_v2( server, valid_until, keys[server], certificates[server] ) @@ -96,10 +103,7 @@ def main(): yaml.safe_dump(result, sys.stdout, default_flow_style=False) - rows = list( - row for server, json in result.items() - for row in rows_v2(server, json) - ) + rows = list(row for server, json in result.items() for row in rows_v2(server, json)) cursor = connection.cursor() cursor.executemany( @@ -107,7 +111,7 @@ def main(): " server_name, key_id, from_server," " ts_added_ms, ts_valid_until_ms, key_json" ") VALUES (%s, %s, %s, %s, %s, %s)", - rows + rows, ) connection.commit() diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 47dac7772d..1deb0fe2b7 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -1,8 +1,16 @@ #! /usr/bin/python +from __future__ import print_function + +import argparse import ast +import os +import re +import sys + import yaml + class DefinitionVisitor(ast.NodeVisitor): def __init__(self): super(DefinitionVisitor, self).__init__() @@ -42,15 +50,18 @@ def non_empty(defs): functions = {name: non_empty(f) for name, f in defs['def'].items()} classes = {name: non_empty(f) for name, f in defs['class'].items()} result = {} - if functions: result['def'] = functions - if classes: result['class'] = classes + if functions: + result['def'] = functions + if classes: + result['class'] = classes names = defs['names'] uses = [] for name in names.get('Load', ()): if name not in names.get('Param', ()) and name not in names.get('Store', ()): uses.append(name) uses.extend(defs['attrs']) - if uses: result['uses'] = uses + if uses: + result['uses'] = uses result['names'] = names result['attrs'] = defs['attrs'] return result @@ -95,7 +106,6 @@ def used_names(prefix, item, defs, names): if __name__ == '__main__': - import sys, os, argparse, re parser = argparse.ArgumentParser(description='Find definitions.') parser.add_argument( @@ -105,24 +115,28 @@ if __name__ == '__main__': "--ignore", action="append", metavar="REGEXP", help="Ignore a pattern" ) parser.add_argument( - "--pattern", action="append", metavar="REGEXP", - help="Search for a pattern" + "--pattern", action="append", metavar="REGEXP", help="Search for a pattern" ) parser.add_argument( - "directories", nargs='+', metavar="DIR", - help="Directories to search for definitions" + "directories", + nargs='+', + metavar="DIR", + help="Directories to search for definitions", ) parser.add_argument( - "--referrers", default=0, type=int, - help="Include referrers up to the given depth" + "--referrers", + default=0, + type=int, + help="Include referrers up to the given depth", ) parser.add_argument( - "--referred", default=0, type=int, - help="Include referred down to the given depth" + "--referred", + default=0, + type=int, + help="Include referred down to the given depth", ) parser.add_argument( - "--format", default="yaml", - help="Output format, one of 'yaml' or 'dot'" + "--format", default="yaml", help="Output format, one of 'yaml' or 'dot'" ) args = parser.parse_args() @@ -162,7 +176,7 @@ if __name__ == '__main__': for used_by in entry.get("used", ()): referrers.add(used_by) for name, definition in names.items(): - if not name in referrers: + if name not in referrers: continue if ignore and any(pattern.match(name) for pattern in ignore): continue @@ -176,7 +190,7 @@ if __name__ == '__main__': for uses in entry.get("uses", ()): referred.add(uses) for name, definition in names.items(): - if not name in referred: + if name not in referred: continue if ignore and any(pattern.match(name) for pattern in ignore): continue @@ -185,12 +199,12 @@ if __name__ == '__main__': if args.format == 'yaml': yaml.dump(result, sys.stdout, default_flow_style=False) elif args.format == 'dot': - print "digraph {" + print("digraph {") for name, entry in result.items(): - print name + print(name) for used_by in entry.get("used", ()): if used_by in result: - print used_by, "->", name - print "}" + print(used_by, "->", name) + print("}") else: raise ValueError("Unknown format %r" % (args.format)) diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index fcc5568835..22b30fa78e 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,8 +1,11 @@ #!/usr/bin/env python2 -import pymacaroons +from __future__ import print_function + import sys +import pymacaroons + if len(sys.argv) == 1: sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],)) sys.exit(1) @@ -11,14 +14,14 @@ macaroon_string = sys.argv[1] key = sys.argv[2] if len(sys.argv) > 2 else None macaroon = pymacaroons.Macaroon.deserialize(macaroon_string) -print macaroon.inspect() +print(macaroon.inspect()) -print "" +print("") verifier = pymacaroons.Verifier() verifier.satisfy_general(lambda c: True) try: verifier.verify(macaroon, key) - print "Signature is correct" + print("Signature is correct") except Exception as e: - print str(e) + print(str(e)) diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index d2acc7654d..2566ce7cef 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -18,21 +18,21 @@ from __future__ import print_function import argparse +import base64 +import json +import sys from urlparse import urlparse, urlunparse import nacl.signing -import json -import base64 import requests -import sys - -from requests.adapters import HTTPAdapter import srvlookup import yaml +from requests.adapters import HTTPAdapter # uncomment the following to enable debug logging of http requests -#from httplib import HTTPConnection -#HTTPConnection.debuglevel = 1 +# from httplib import HTTPConnection +# HTTPConnection.debuglevel = 1 + def encode_base64(input_bytes): """Encode bytes as a base64 string without any padding.""" @@ -58,15 +58,15 @@ def decode_base64(input_string): def encode_canonical_json(value): return json.dumps( - value, - # Encode code-points outside of ASCII as UTF-8 rather than \u escapes - ensure_ascii=False, - # Remove unecessary white space. - separators=(',',':'), - # Sort the keys of dictionaries. - sort_keys=True, - # Encode the resulting unicode as UTF-8 bytes. - ).encode("UTF-8") + value, + # Encode code-points outside of ASCII as UTF-8 rather than \u escapes + ensure_ascii=False, + # Remove unecessary white space. + separators=(',', ':'), + # Sort the keys of dictionaries. + sort_keys=True, + # Encode the resulting unicode as UTF-8 bytes. + ).encode("UTF-8") def sign_json(json_object, signing_key, signing_name): @@ -88,6 +88,7 @@ def sign_json(json_object, signing_key, signing_name): NACL_ED25519 = "ed25519" + def decode_signing_key_base64(algorithm, version, key_base64): """Decode a base64 encoded signing key Args: @@ -143,14 +144,12 @@ def request_json(method, origin_name, origin_key, destination, path, content): authorization_headers = [] for key, sig in signed_json["signatures"][origin_name].items(): - header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( - origin_name, key, sig, - ) + header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig) authorization_headers.append(bytes(header)) - print ("Authorization: %s" % header, file=sys.stderr) + print("Authorization: %s" % header, file=sys.stderr) dest = "matrix://%s%s" % (destination, path) - print ("Requesting %s" % dest, file=sys.stderr) + print("Requesting %s" % dest, file=sys.stderr) s = requests.Session() s.mount("matrix://", MatrixConnectionAdapter()) @@ -158,10 +157,7 @@ def request_json(method, origin_name, origin_key, destination, path, content): result = s.request( method=method, url=dest, - headers={ - "Host": destination, - "Authorization": authorization_headers[0] - }, + headers={"Host": destination, "Authorization": authorization_headers[0]}, verify=False, data=content, ) @@ -171,50 +167,50 @@ def request_json(method, origin_name, origin_key, destination, path, content): def main(): parser = argparse.ArgumentParser( - description= - "Signs and sends a federation request to a matrix homeserver", + description="Signs and sends a federation request to a matrix homeserver" ) parser.add_argument( - "-N", "--server-name", + "-N", + "--server-name", help="Name to give as the local homeserver. If unspecified, will be " - "read from the config file.", + "read from the config file.", ) parser.add_argument( - "-k", "--signing-key-path", + "-k", + "--signing-key-path", help="Path to the file containing the private ed25519 key to sign the " - "request with.", + "request with.", ) parser.add_argument( - "-c", "--config", + "-c", + "--config", default="homeserver.yaml", help="Path to server config file. Ignored if --server-name and " - "--signing-key-path are both given.", + "--signing-key-path are both given.", ) parser.add_argument( - "-d", "--destination", + "-d", + "--destination", default="matrix.org", help="name of the remote homeserver. We will do SRV lookups and " - "connect appropriately.", + "connect appropriately.", ) parser.add_argument( - "-X", "--method", + "-X", + "--method", help="HTTP method to use for the request. Defaults to GET if --data is" - "unspecified, POST if it is." + "unspecified, POST if it is.", ) - parser.add_argument( - "--body", - help="Data to send as the body of the HTTP request" - ) + parser.add_argument("--body", help="Data to send as the body of the HTTP request") parser.add_argument( - "path", - help="request path. We will add '/_matrix/federation/v1/' to this." + "path", help="request path. We will add '/_matrix/federation/v1/' to this." ) args = parser.parse_args() @@ -227,13 +223,15 @@ def main(): result = request_json( args.method, - args.server_name, key, args.destination, + args.server_name, + key, + args.destination, "/_matrix/federation/v1/" + args.path, content=args.body, ) json.dump(result, sys.stdout) - print ("") + print("") def read_args_from_config(args): @@ -253,7 +251,7 @@ class MatrixConnectionAdapter(HTTPAdapter): return s, 8448 if ":" in s: - out = s.rsplit(":",1) + out = s.rsplit(":", 1) try: port = int(out[1]) except ValueError: @@ -263,7 +261,7 @@ class MatrixConnectionAdapter(HTTPAdapter): try: srv = srvlookup.lookup("matrix", "tcp", s)[0] return srv.host, srv.port - except: + except Exception: return s, 8448 def get_connection(self, url, proxies=None): @@ -272,10 +270,9 @@ class MatrixConnectionAdapter(HTTPAdapter): (host, port) = self.lookup(parsed.netloc) netloc = "%s:%d" % (host, port) print("Connecting to %s" % (netloc,), file=sys.stderr) - url = urlunparse(( - "https", netloc, parsed.path, parsed.params, parsed.query, - parsed.fragment, - )) + url = urlunparse( + ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) + ) return super(MatrixConnectionAdapter, self).get_connection(url, proxies) diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py index 616d6a10e7..514d80fa60 100644 --- a/scripts-dev/hash_history.py +++ b/scripts-dev/hash_history.py @@ -1,23 +1,31 @@ -from synapse.storage.pdu import PduStore -from synapse.storage.signatures import SignatureStore -from synapse.storage._base import SQLBaseStore -from synapse.federation.units import Pdu -from synapse.crypto.event_signing import ( - add_event_pdu_content_hash, compute_pdu_event_reference_hash -) -from synapse.api.events.utils import prune_pdu -from unpaddedbase64 import encode_base64, decode_base64 -from canonicaljson import encode_canonical_json +from __future__ import print_function + import sqlite3 import sys +from unpaddedbase64 import decode_base64, encode_base64 + +from synapse.crypto.event_signing import ( + add_event_pdu_content_hash, + compute_pdu_event_reference_hash, +) +from synapse.federation.units import Pdu +from synapse.storage._base import SQLBaseStore +from synapse.storage.pdu import PduStore +from synapse.storage.signatures import SignatureStore + + class Store(object): _get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"] _get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"] _get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"] - _get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"] + _get_pdu_origin_signatures_txn = SignatureStore.__dict__[ + "_get_pdu_origin_signatures_txn" + ] _store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"] - _store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"] + _store_pdu_reference_hash_txn = SignatureStore.__dict__[ + "_store_pdu_reference_hash_txn" + ] _store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"] _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"] @@ -26,9 +34,7 @@ store = Store() def select_pdus(cursor): - cursor.execute( - "SELECT pdu_id, origin FROM pdus ORDER BY depth ASC" - ) + cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC") ids = cursor.fetchall() @@ -41,23 +47,30 @@ def select_pdus(cursor): for pdu in pdus: try: if pdu.prev_pdus: - print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus + print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus) for pdu_id, origin, hashes in pdu.prev_pdus: ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)] hashes[ref_alg] = encode_base64(ref_hsh) - store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh) - print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus + store._store_prev_pdu_hash_txn( + cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh + ) + print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus) pdu = add_event_pdu_content_hash(pdu) ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu) reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh) - store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh) + store._store_pdu_reference_hash_txn( + cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh + ) for alg, hsh_base64 in pdu.hashes.items(): - print alg, hsh_base64 - store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)) + print(alg, hsh_base64) + store._store_pdu_content_hash_txn( + cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64) + ) + + except Exception: + print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus) - except: - print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus def main(): conn = sqlite3.connect(sys.argv[1]) @@ -65,5 +78,6 @@ def main(): select_pdus(cursor) conn.commit() -if __name__=='__main__': + +if __name__ == '__main__': main() diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index 58d40c4ff4..da027be26e 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -1,18 +1,17 @@ #! /usr/bin/python -import ast import argparse +import ast import os import sys + import yaml PATTERNS_V1 = [] PATTERNS_V2 = [] -RESULT = { - "v1": PATTERNS_V1, - "v2": PATTERNS_V2, -} +RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2} + class CallVisitor(ast.NodeVisitor): def visit_Call(self, node): @@ -21,7 +20,6 @@ class CallVisitor(ast.NodeVisitor): else: return - if name == "client_path_patterns": PATTERNS_V1.append(node.args[0].s) elif name == "client_v2_patterns": @@ -42,8 +40,10 @@ def find_patterns_in_file(filepath): parser = argparse.ArgumentParser(description='Find url patterns.') parser.add_argument( - "directories", nargs='+', metavar="DIR", - help="Directories to search for definitions" + "directories", + nargs='+', + metavar="DIR", + help="Directories to search for definitions", ) args = parser.parse_args() diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py index 18be711e92..1a36b94038 100644 --- a/scripts-dev/tail-synapse.py +++ b/scripts-dev/tail-synapse.py @@ -1,8 +1,9 @@ -import requests import collections +import json import sys import time -import json + +import requests Entry = collections.namedtuple("Entry", "name position rows") @@ -30,11 +31,11 @@ def parse_response(content): def replicate(server, streams): - return parse_response(requests.get( - server + "/_synapse/replication", - verify=False, - params=streams - ).content) + return parse_response( + requests.get( + server + "/_synapse/replication", verify=False, params=streams + ).content + ) def main(): @@ -45,7 +46,7 @@ def main(): try: streams = { row.name: row.position - for row in replicate(server, {"streams":"-1"})["streams"].rows + for row in replicate(server, {"streams": "-1"})["streams"].rows } except requests.exceptions.ConnectionError as e: time.sleep(0.1) @@ -53,8 +54,8 @@ def main(): while True: try: results = replicate(server, streams) - except: - sys.stdout.write("connection_lost("+ repr(streams) + ")\n") + except Exception: + sys.stdout.write("connection_lost(" + repr(streams) + ")\n") break for update in results.values(): for row in update.rows: @@ -62,6 +63,5 @@ def main(): streams[update.name] = update.position - -if __name__=='__main__': +if __name__ == '__main__': main() diff --git a/scripts/hash_password b/scripts/hash_password index 215ab25cfe..a62bb5aa83 100755 --- a/scripts/hash_password +++ b/scripts/hash_password @@ -1,12 +1,10 @@ #!/usr/bin/env python import argparse - +import getpass import sys import bcrypt -import getpass - import yaml bcrypt_rounds=12 @@ -52,4 +50,3 @@ if __name__ == "__main__": password = prompt_for_pass() print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds)) - diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index 7914ead889..e630936f78 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -36,12 +36,9 @@ from __future__ import print_function import argparse import logging - -import sys - import os - import shutil +import sys from synapse.rest.media.v1.filepath import MediaFilePaths @@ -77,24 +74,23 @@ def move_media(origin_server, file_id, src_paths, dest_paths): if not os.path.exists(original_file): logger.warn( "Original for %s/%s (%s) does not exist", - origin_server, file_id, original_file, + origin_server, + file_id, + original_file, ) else: mkdir_and_move( - original_file, - dest_paths.remote_media_filepath(origin_server, file_id), + original_file, dest_paths.remote_media_filepath(origin_server, file_id) ) # now look for thumbnails - original_thumb_dir = src_paths.remote_media_thumbnail_dir( - origin_server, file_id, - ) + original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) if not os.path.exists(original_thumb_dir): return mkdir_and_move( original_thumb_dir, - dest_paths.remote_media_thumbnail_dir(origin_server, file_id) + dest_paths.remote_media_thumbnail_dir(origin_server, file_id), ) @@ -109,24 +105,16 @@ def mkdir_and_move(original_file, dest_file): if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class = argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument( - "-v", action='store_true', help='enable debug logging') - parser.add_argument( - "src_repo", - help="Path to source content repo", - ) - parser.add_argument( - "dest_repo", - help="Path to source content repo", + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) + parser.add_argument("-v", action='store_true', help='enable debug logging') + parser.add_argument("src_repo", help="Path to source content repo") + parser.add_argument("dest_repo", help="Path to source content repo") args = parser.parse_args() logging_config = { "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s" + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", } logging.basicConfig(**logging_config) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 91bdb3a25b..89143c5d59 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import argparse import getpass @@ -22,19 +23,23 @@ import hmac import json import sys import urllib2 + +from six import input + import yaml def request_registration(user, password, server_location, shared_secret, admin=False): req = urllib2.Request( "%s/_matrix/client/r0/admin/register" % (server_location,), - headers={'Content-Type': 'application/json'} + headers={'Content-Type': 'application/json'}, ) try: if sys.version_info[:3] >= (2, 7, 9): # As of version 2.7.9, urllib2 now checks SSL certs import ssl + f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) else: f = urllib2.urlopen(req) @@ -42,18 +47,15 @@ def request_registration(user, password, server_location, shared_secret, admin=F f.close() nonce = json.loads(body)["nonce"] except urllib2.HTTPError as e: - print "ERROR! Received %d %s" % (e.code, e.reason,) + print("ERROR! Received %d %s" % (e.code, e.reason)) if 400 <= e.code < 500: if e.info().type == "application/json": resp = json.load(e) if "error" in resp: - print resp["error"] + print(resp["error"]) sys.exit(1) - mac = hmac.new( - key=shared_secret, - digestmod=hashlib.sha1, - ) + mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1) mac.update(nonce) mac.update("\x00") @@ -75,30 +77,31 @@ def request_registration(user, password, server_location, shared_secret, admin=F server_location = server_location.rstrip("/") - print "Sending registration request..." + print("Sending registration request...") req = urllib2.Request( "%s/_matrix/client/r0/admin/register" % (server_location,), data=json.dumps(data), - headers={'Content-Type': 'application/json'} + headers={'Content-Type': 'application/json'}, ) try: if sys.version_info[:3] >= (2, 7, 9): # As of version 2.7.9, urllib2 now checks SSL certs import ssl + f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) else: f = urllib2.urlopen(req) f.read() f.close() - print "Success." + print("Success.") except urllib2.HTTPError as e: - print "ERROR! Received %d %s" % (e.code, e.reason,) + print("ERROR! Received %d %s" % (e.code, e.reason)) if 400 <= e.code < 500: if e.info().type == "application/json": resp = json.load(e) if "error" in resp: - print resp["error"] + print(resp["error"]) sys.exit(1) @@ -106,35 +109,35 @@ def register_new_user(user, password, server_location, shared_secret, admin): if not user: try: default_user = getpass.getuser() - except: + except Exception: default_user = None if default_user: - user = raw_input("New user localpart [%s]: " % (default_user,)) + user = input("New user localpart [%s]: " % (default_user,)) if not user: user = default_user else: - user = raw_input("New user localpart: ") + user = input("New user localpart: ") if not user: - print "Invalid user name" + print("Invalid user name") sys.exit(1) if not password: password = getpass.getpass("Password: ") if not password: - print "Password cannot be blank." + print("Password cannot be blank.") sys.exit(1) confirm_password = getpass.getpass("Confirm password: ") if password != confirm_password: - print "Passwords do not match" + print("Passwords do not match") sys.exit(1) if admin is None: - admin = raw_input("Make admin [no]: ") + admin = input("Make admin [no]: ") if admin in ("y", "yes", "true"): admin = True else: @@ -146,42 +149,51 @@ def register_new_user(user, password, server_location, shared_secret, admin): if __name__ == "__main__": parser = argparse.ArgumentParser( description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" - " configured with the 'registration_shared_secret' option" - " set.", + " registration has been disabled. The home server must be" + " configured with the 'registration_shared_secret' option" + " set." ) parser.add_argument( - "-u", "--user", + "-u", + "--user", default=None, help="Local part of the new user. Will prompt if omitted.", ) parser.add_argument( - "-p", "--password", + "-p", + "--password", default=None, help="New password for user. Will prompt if omitted.", ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( - "-a", "--admin", + "-a", + "--admin", action="store_true", - help="Register new user as an admin. Will prompt if --no-admin is not set either.", + help=( + "Register new user as an admin. " + "Will prompt if --no-admin is not set either." + ), ) admin_group.add_argument( "--no-admin", action="store_true", - help="Register new user as a regular user. Will prompt if --admin is not set either.", + help=( + "Register new user as a regular user. " + "Will prompt if --admin is not set either." + ), ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( - "-c", "--config", + "-c", + "--config", type=argparse.FileType('r'), help="Path to server config file. Used to read in shared secret.", ) group.add_argument( - "-k", "--shared-secret", - help="Shared secret as defined in server config file.", + "-k", "--shared-secret", help="Shared secret as defined in server config file." ) parser.add_argument( @@ -189,7 +201,7 @@ if __name__ == "__main__": default="https://localhost:8448", nargs='?', help="URL to use to talk to the home server. Defaults to " - " 'https://localhost:8448'.", + " 'https://localhost:8448'.", ) args = parser.parse_args() @@ -198,7 +210,7 @@ if __name__ == "__main__": config = yaml.safe_load(args.config) secret = config.get("registration_shared_secret", None) if not secret: - print "No 'registration_shared_secret' defined in config." + print("No 'registration_shared_secret' defined in config.") sys.exit(1) else: secret = args.shared_secret diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index b9b828c154..2f6e69e552 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -15,23 +15,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from twisted.enterprise import adbapi - -from synapse.storage._base import LoggingTransaction, SQLBaseStore -from synapse.storage.engines import create_engine -from synapse.storage.prepare_database import prepare_database - import argparse import curses import logging import sys import time import traceback -import yaml from six import string_types +import yaml + +from twisted.enterprise import adbapi +from twisted.internet import defer, reactor + +from synapse.storage._base import LoggingTransaction, SQLBaseStore +from synapse.storage.engines import create_engine +from synapse.storage.prepare_database import prepare_database logger = logging.getLogger("synapse_port_db") @@ -105,6 +105,7 @@ class Store(object): *All* database interactions should go through this object. """ + def __init__(self, db_pool, engine): self.db_pool = db_pool self.database_engine = engine @@ -135,7 +136,8 @@ class Store(object): txn = conn.cursor() return func( LoggingTransaction(txn, desc, self.database_engine, [], []), - *args, **kwargs + *args, + **kwargs ) except self.database_engine.module.DatabaseError as e: if self.database_engine.is_deadlock(e): @@ -158,22 +160,20 @@ class Store(object): def r(txn): txn.execute(sql, args) return txn.fetchall() + return self.runInteraction("execute_sql", r) def insert_many_txn(self, txn, table, headers, rows): sql = "INSERT INTO %s (%s) VALUES (%s)" % ( table, ", ".join(k for k in headers), - ", ".join("%s" for _ in headers) + ", ".join("%s" for _ in headers), ) try: txn.executemany(sql, rows) - except: - logger.exception( - "Failed to insert: %s", - table, - ) + except Exception: + logger.exception("Failed to insert: %s", table) raise @@ -206,7 +206,7 @@ class Porter(object): "table_name": table, "forward_rowid": 1, "backward_rowid": 0, - } + }, ) forward_chunk = 1 @@ -221,10 +221,10 @@ class Porter(object): table, forward_chunk, backward_chunk ) else: + def delete_all(txn): txn.execute( - "DELETE FROM port_from_sqlite3 WHERE table_name = %s", - (table,) + "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) ) txn.execute("TRUNCATE %s CASCADE" % (table,)) @@ -232,11 +232,7 @@ class Porter(object): yield self.postgres_store._simple_insert( table="port_from_sqlite3", - values={ - "table_name": table, - "forward_rowid": 1, - "backward_rowid": 0, - } + values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, ) forward_chunk = 1 @@ -251,12 +247,16 @@ class Porter(object): ) @defer.inlineCallbacks - def handle_table(self, table, postgres_size, table_size, forward_chunk, - backward_chunk): + def handle_table( + self, table, postgres_size, table_size, forward_chunk, backward_chunk + ): logger.info( "Table %s: %i/%i (rows %i-%i) already ported", - table, postgres_size, table_size, - backward_chunk+1, forward_chunk-1, + table, + postgres_size, + table_size, + backward_chunk + 1, + forward_chunk - 1, ) if not table_size: @@ -271,7 +271,9 @@ class Porter(object): return if table in ( - "user_directory", "user_directory_search", "users_who_share_rooms", + "user_directory", + "user_directory_search", + "users_who_share_rooms", "users_in_pubic_room", ): # We don't port these tables, as they're a faff and we can regenreate @@ -283,37 +285,35 @@ class Porter(object): # We need to make sure there is a single row, `(X, null), as that is # what synapse expects to be there. yield self.postgres_store._simple_insert( - table=table, - values={"stream_id": None}, + table=table, values={"stream_id": None} ) self.progress.update(table, table_size) # Mark table as done return forward_select = ( - "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" - % (table,) + "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) ) backward_select = ( - "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" - % (table,) + "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) ) do_forward = [True] do_backward = [True] while True: + def r(txn): forward_rows = [] backward_rows = [] if do_forward[0]: - txn.execute(forward_select, (forward_chunk, self.batch_size,)) + txn.execute(forward_select, (forward_chunk, self.batch_size)) forward_rows = txn.fetchall() if not forward_rows: do_forward[0] = False if do_backward[0]: - txn.execute(backward_select, (backward_chunk, self.batch_size,)) + txn.execute(backward_select, (backward_chunk, self.batch_size)) backward_rows = txn.fetchall() if not backward_rows: do_backward[0] = False @@ -325,9 +325,7 @@ class Porter(object): return headers, forward_rows, backward_rows - headers, frows, brows = yield self.sqlite_store.runInteraction( - "select", r - ) + headers, frows, brows = yield self.sqlite_store.runInteraction("select", r) if frows or brows: if frows: @@ -339,9 +337,7 @@ class Porter(object): rows = self._convert_rows(table, headers, rows) def insert(txn): - self.postgres_store.insert_many_txn( - txn, table, headers[1:], rows - ) + self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) self.postgres_store._simple_update_one_txn( txn, @@ -362,8 +358,9 @@ class Porter(object): return @defer.inlineCallbacks - def handle_search_table(self, postgres_size, table_size, forward_chunk, - backward_chunk): + def handle_search_table( + self, postgres_size, table_size, forward_chunk, backward_chunk + ): select = ( "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" " FROM event_search as es" @@ -373,8 +370,9 @@ class Porter(object): ) while True: + def r(txn): - txn.execute(select, (forward_chunk, self.batch_size,)) + txn.execute(select, (forward_chunk, self.batch_size)) rows = txn.fetchall() headers = [column[0] for column in txn.description] @@ -402,18 +400,21 @@ class Porter(object): else: rows_dict.append(d) - txn.executemany(sql, [ - ( - row["event_id"], - row["room_id"], - row["key"], - row["sender"], - row["value"], - row["origin_server_ts"], - row["stream_ordering"], - ) - for row in rows_dict - ]) + txn.executemany( + sql, + [ + ( + row["event_id"], + row["room_id"], + row["key"], + row["sender"], + row["value"], + row["origin_server_ts"], + row["stream_ordering"], + ) + for row in rows_dict + ], + ) self.postgres_store._simple_update_one_txn( txn, @@ -437,7 +438,8 @@ class Porter(object): def setup_db(self, db_config, database_engine): db_conn = database_engine.module.connect( **{ - k: v for k, v in db_config.get("args", {}).items() + k: v + for k, v in db_config.get("args", {}).items() if not k.startswith("cp_") } ) @@ -450,13 +452,11 @@ class Porter(object): def run(self): try: sqlite_db_pool = adbapi.ConnectionPool( - self.sqlite_config["name"], - **self.sqlite_config["args"] + self.sqlite_config["name"], **self.sqlite_config["args"] ) postgres_db_pool = adbapi.ConnectionPool( - self.postgres_config["name"], - **self.postgres_config["args"] + self.postgres_config["name"], **self.postgres_config["args"] ) sqlite_engine = create_engine(sqlite_config) @@ -465,9 +465,7 @@ class Porter(object): self.sqlite_store = Store(sqlite_db_pool, sqlite_engine) self.postgres_store = Store(postgres_db_pool, postgres_engine) - yield self.postgres_store.execute( - postgres_engine.check_database - ) + yield self.postgres_store.execute(postgres_engine.check_database) # Step 1. Set up databases. self.progress.set_state("Preparing SQLite3") @@ -477,6 +475,7 @@ class Porter(object): self.setup_db(postgres_config, postgres_engine) self.progress.set_state("Creating port tables") + def create_port_table(txn): txn.execute( "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" @@ -501,9 +500,7 @@ class Porter(object): ) try: - yield self.postgres_store.runInteraction( - "alter_table", alter_table - ) + yield self.postgres_store.runInteraction("alter_table", alter_table) except Exception as e: pass @@ -514,11 +511,7 @@ class Porter(object): # Step 2. Get tables. self.progress.set_state("Fetching tables") sqlite_tables = yield self.sqlite_store._simple_select_onecol( - table="sqlite_master", - keyvalues={ - "type": "table", - }, - retcol="name", + table="sqlite_master", keyvalues={"type": "table"}, retcol="name" ) postgres_tables = yield self.postgres_store._simple_select_onecol( @@ -545,18 +538,14 @@ class Porter(object): # Step 4. Do the copying. self.progress.set_state("Copying to postgres") yield defer.gatherResults( - [ - self.handle_table(*res) - for res in setup_res - ], - consumeErrors=True, + [self.handle_table(*res) for res in setup_res], consumeErrors=True ) # Step 5. Do final post-processing yield self._setup_state_group_id_seq() self.progress.done() - except: + except Exception: global end_error_exec_info end_error_exec_info = sys.exc_info() logger.exception("") @@ -566,9 +555,7 @@ class Porter(object): def _convert_rows(self, table, headers, rows): bool_col_names = BOOLEAN_COLUMNS.get(table, []) - bool_cols = [ - i for i, h in enumerate(headers) if h in bool_col_names - ] + bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] class BadValueException(Exception): pass @@ -577,18 +564,21 @@ class Porter(object): if j in bool_cols: return bool(col) elif isinstance(col, string_types) and "\0" in col: - logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col) - raise BadValueException(); + logger.warn( + "DROPPING ROW: NUL value in table %s col %s: %r", + table, + headers[j], + col, + ) + raise BadValueException() return col outrows = [] for i, row in enumerate(rows): try: - outrows.append(tuple( - conv(j, col) - for j, col in enumerate(row) - if j > 0 - )) + outrows.append( + tuple(conv(j, col) for j, col in enumerate(row) if j > 0) + ) except BadValueException: pass @@ -616,9 +606,7 @@ class Porter(object): return headers, [r for r in rows if r[ts_ind] < yesterday] - headers, rows = yield self.sqlite_store.runInteraction( - "select", r, - ) + headers, rows = yield self.sqlite_store.runInteraction("select", r) rows = self._convert_rows("sent_transactions", headers, rows) @@ -639,7 +627,7 @@ class Porter(object): txn.execute( "SELECT rowid FROM sent_transactions WHERE ts >= ?" " ORDER BY rowid ASC LIMIT 1", - (yesterday,) + (yesterday,), ) rows = txn.fetchall() @@ -657,21 +645,17 @@ class Porter(object): "table_name": "sent_transactions", "forward_rowid": next_chunk, "backward_rowid": 0, - } + }, ) def get_sent_table_size(txn): txn.execute( - "SELECT count(*) FROM sent_transactions" - " WHERE ts >= ?", - (yesterday,) + "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) ) size, = txn.fetchone() return int(size) - remaining_count = yield self.sqlite_store.execute( - get_sent_table_size - ) + remaining_count = yield self.sqlite_store.execute(get_sent_table_size) total_count = remaining_count + inserted_rows @@ -680,13 +664,11 @@ class Porter(object): @defer.inlineCallbacks def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): frows = yield self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), - forward_chunk, + "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk ) brows = yield self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), - backward_chunk, + "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk ) defer.returnValue(frows[0][0] + brows[0][0]) @@ -694,7 +676,7 @@ class Porter(object): @defer.inlineCallbacks def _get_already_ported_count(self, table): rows = yield self.postgres_store.execute_sql( - "SELECT count(*) FROM %s" % (table,), + "SELECT count(*) FROM %s" % (table,) ) defer.returnValue(rows[0][0]) @@ -717,22 +699,21 @@ class Porter(object): def _setup_state_group_id_seq(self): def r(txn): txn.execute("SELECT MAX(id) FROM state_groups") - next_id = txn.fetchone()[0]+1 - txn.execute( - "ALTER SEQUENCE state_group_id_seq RESTART WITH %s", - (next_id,), - ) + next_id = txn.fetchone()[0] + 1 + txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) + return self.postgres_store.runInteraction("setup_state_group_id_seq", r) ############################################## -###### The following is simply UI stuff ###### +# The following is simply UI stuff ############################################## class Progress(object): """Used to report progress of the port """ + def __init__(self): self.tables = {} @@ -758,6 +739,7 @@ class Progress(object): class CursesProgress(Progress): """Reports progress to a curses window """ + def __init__(self, stdscr): self.stdscr = stdscr @@ -801,7 +783,7 @@ class CursesProgress(Progress): duration = int(now) - int(self.start_time) minutes, seconds = divmod(duration, 60) - duration_str = '%02dm %02ds' % (minutes, seconds,) + duration_str = '%02dm %02ds' % (minutes, seconds) if self.finished: status = "Time spent: %s (Done!)" % (duration_str,) @@ -814,16 +796,12 @@ class CursesProgress(Progress): est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60) else: est_remaining_str = "Unknown" - status = ( - "Time spent: %s (est. remaining: %s)" - % (duration_str, est_remaining_str,) + status = "Time spent: %s (est. remaining: %s)" % ( + duration_str, + est_remaining_str, ) - self.stdscr.addstr( - 0, 0, - status, - curses.A_BOLD, - ) + self.stdscr.addstr(0, 0, status, curses.A_BOLD) max_len = max([len(t) for t in self.tables.keys()]) @@ -831,9 +809,7 @@ class CursesProgress(Progress): middle_space = 1 items = self.tables.items() - items.sort( - key=lambda i: (i[1]["perc"], i[0]), - ) + items.sort(key=lambda i: (i[1]["perc"], i[0])) for i, (table, data) in enumerate(items): if i + 2 >= rows: @@ -844,9 +820,7 @@ class CursesProgress(Progress): color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) self.stdscr.addstr( - i + 2, left_margin + max_len - len(table), - table, - curses.A_BOLD | color, + i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color ) size = 20 @@ -857,15 +831,13 @@ class CursesProgress(Progress): ) self.stdscr.addstr( - i + 2, left_margin + max_len + middle_space, + i + 2, + left_margin + max_len + middle_space, "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), ) if self.finished: - self.stdscr.addstr( - rows - 1, 0, - "Press any key to exit...", - ) + self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") self.stdscr.refresh() self.last_update = time.time() @@ -877,29 +849,25 @@ class CursesProgress(Progress): def set_state(self, state): self.stdscr.clear() - self.stdscr.addstr( - 0, 0, - state + "...", - curses.A_BOLD, - ) + self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) self.stdscr.refresh() class TerminalProgress(Progress): """Just prints progress to the terminal """ + def update(self, table, num_done): super(TerminalProgress, self).update(table, num_done) data = self.tables[table] - print "%s: %d%% (%d/%d)" % ( - table, data["perc"], - data["num_done"], data["total"], + print( + "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) ) def set_state(self, state): - print state + "..." + print(state + "...") ############################################## @@ -909,34 +877,38 @@ class TerminalProgress(Progress): if __name__ == "__main__": parser = argparse.ArgumentParser( description="A script to port an existing synapse SQLite database to" - " a new PostgreSQL database." + " a new PostgreSQL database." ) parser.add_argument("-v", action='store_true') parser.add_argument( - "--sqlite-database", required=True, + "--sqlite-database", + required=True, help="The snapshot of the SQLite database file. This must not be" - " currently used by a running synapse server" + " currently used by a running synapse server", ) parser.add_argument( - "--postgres-config", type=argparse.FileType('r'), required=True, - help="The database config file for the PostgreSQL database" + "--postgres-config", + type=argparse.FileType('r'), + required=True, + help="The database config file for the PostgreSQL database", ) parser.add_argument( - "--curses", action='store_true', - help="display a curses based progress UI" + "--curses", action='store_true', help="display a curses based progress UI" ) parser.add_argument( - "--batch-size", type=int, default=1000, + "--batch-size", + type=int, + default=1000, help="The number of rows to select from the SQLite table each" - " iteration [default=1000]", + " iteration [default=1000]", ) args = parser.parse_args() logging_config = { "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s" + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", } if args.curses: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..84f8fe2d9e 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -568,7 +568,7 @@ def run(hs): clock.call_later(5 * 60, start_phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: - print (hs.config.pid_file) + print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 8fccf573ee..79fe9c3dac 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -28,7 +28,7 @@ if __name__ == "__main__": sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) - print (getattr(config, key)) + print(getattr(config, key)) sys.exit(0) else: sys.stderr.write("Unknown command %r\n" % (action,)) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 3d2e90dd5b..14dae65ea0 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -106,10 +106,7 @@ class Config(object): @classmethod def check_file(cls, file_path, config_name): if file_path is None: - raise ConfigError( - "Missing config for %s." - % (config_name,) - ) + raise ConfigError("Missing config for %s." % (config_name,)) try: os.stat(file_path) except OSError as e: @@ -128,9 +125,7 @@ class Config(object): if e.errno != errno.EEXIST: raise if not os.path.isdir(dir_path): - raise ConfigError( - "%s is not a directory" % (dir_path,) - ) + raise ConfigError("%s is not a directory" % (dir_path,)) return dir_path @classmethod @@ -156,21 +151,20 @@ class Config(object): return results def generate_config( - self, - config_dir_path, - server_name, - is_generating_file, - report_stats=None, + self, config_dir_path, server_name, is_generating_file, report_stats=None ): default_config = "# vim:ft=yaml\n" - default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all( - "default_config", - config_dir_path=config_dir_path, - server_name=server_name, - is_generating_file=is_generating_file, - report_stats=report_stats, - )) + default_config += "\n\n".join( + dedent(conf) + for conf in self.invoke_all( + "default_config", + config_dir_path=config_dir_path, + server_name=server_name, + is_generating_file=is_generating_file, + report_stats=report_stats, + ) + ) config = yaml.load(default_config) @@ -178,23 +172,22 @@ class Config(object): @classmethod def load_config(cls, description, argv): - config_parser = argparse.ArgumentParser( - description=description, - ) + config_parser = argparse.ArgumentParser(description=description) config_parser.add_argument( - "-c", "--config-path", + "-c", + "--config-path", action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files." + " may specify directories containing *.yaml files.", ) config_parser.add_argument( "--keys-directory", metavar="DIRECTORY", help="Where files such as certs and signing keys are stored when" - " their location is given explicitly in the config." - " Defaults to the directory containing the last config file", + " their location is given explicitly in the config." + " Defaults to the directory containing the last config file", ) config_args = config_parser.parse_args(argv) @@ -203,9 +196,7 @@ class Config(object): obj = cls() obj.read_config_files( - config_files, - keys_directory=config_args.keys_directory, - generate_keys=False, + config_files, keys_directory=config_args.keys_directory, generate_keys=False ) return obj @@ -213,38 +204,38 @@ class Config(object): def load_or_generate_config(cls, description, argv): config_parser = argparse.ArgumentParser(add_help=False) config_parser.add_argument( - "-c", "--config-path", + "-c", + "--config-path", action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files." + " may specify directories containing *.yaml files.", ) config_parser.add_argument( "--generate-config", action="store_true", - help="Generate a config file for the server name" + help="Generate a config file for the server name", ) config_parser.add_argument( "--report-stats", action="store", help="Whether the generated config reports anonymized usage statistics", - choices=["yes", "no"] + choices=["yes", "no"], ) config_parser.add_argument( "--generate-keys", action="store_true", - help="Generate any missing key files then exit" + help="Generate any missing key files then exit", ) config_parser.add_argument( "--keys-directory", metavar="DIRECTORY", help="Used with 'generate-*' options to specify where files such as" - " certs and signing keys should be stored in, unless explicitly" - " specified in the config." + " certs and signing keys should be stored in, unless explicitly" + " specified in the config.", ) config_parser.add_argument( - "-H", "--server-name", - help="The server name to generate a config file for" + "-H", "--server-name", help="The server name to generate a config file for" ) config_args, remaining_args = config_parser.parse_known_args(argv) @@ -257,8 +248,8 @@ class Config(object): if config_args.generate_config: if config_args.report_stats is None: config_parser.error( - "Please specify either --report-stats=yes or --report-stats=no\n\n" + - MISSING_REPORT_STATS_SPIEL + "Please specify either --report-stats=yes or --report-stats=no\n\n" + + MISSING_REPORT_STATS_SPIEL ) if not config_files: config_parser.error( @@ -287,26 +278,32 @@ class Config(object): config_dir_path=config_dir_path, server_name=server_name, report_stats=(config_args.report_stats == "yes"), - is_generating_file=True + is_generating_file=True, ) obj.invoke_all("generate_files", config) config_file.write(config_str) - print(( - "A config file has been generated in %r for server name" - " %r with corresponding SSL keys and self-signed" - " certificates. Please review this file and customise it" - " to your needs." - ) % (config_path, server_name)) + print( + ( + "A config file has been generated in %r for server name" + " %r with corresponding SSL keys and self-signed" + " certificates. Please review this file and customise it" + " to your needs." + ) + % (config_path, server_name) + ) print( "If this server name is incorrect, you will need to" " regenerate the SSL certificates" ) return else: - print(( - "Config file %r already exists. Generating any missing key" - " files." - ) % (config_path,)) + print( + ( + "Config file %r already exists. Generating any missing key" + " files." + ) + % (config_path,) + ) generate_keys = True parser = argparse.ArgumentParser( @@ -338,8 +335,7 @@ class Config(object): return obj - def read_config_files(self, config_files, keys_directory=None, - generate_keys=False): + def read_config_files(self, config_files, keys_directory=None, generate_keys=False): if not keys_directory: keys_directory = os.path.dirname(config_files[-1]) @@ -364,8 +360,9 @@ class Config(object): if "report_stats" not in config: raise ConfigError( - MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + - MISSING_REPORT_STATS_SPIEL + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + + "\n" + + MISSING_REPORT_STATS_SPIEL ) if generate_keys: @@ -399,16 +396,16 @@ def find_config_files(search_paths): for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if not os.path.isfile(entry_path): - print ( - "Found subdirectory in config directory: %r. IGNORING." - ) % (entry_path, ) + err = "Found subdirectory in config directory: %r. IGNORING." + print(err % (entry_path,)) continue if not entry.endswith(".yaml"): - print ( - "Found file in config directory that does not" - " end in '.yaml': %r. IGNORING." - ) % (entry_path, ) + err = ( + "Found file in config directory that does not end in " + "'.yaml': %r. IGNORING." + ) + print(err % (entry_path,)) continue files.append(entry_path) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index be61147b9b..d9d0255d0b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,7 +18,7 @@ import threading import time from six import PY2, iteritems, iterkeys, itervalues -from six.moves import intern, range +from six.moves import builtins, intern, range from canonicaljson import json from prometheus_client import Histogram @@ -1233,7 +1233,7 @@ def db_to_json(db_content): # psycopg2 on Python 2 returns buffer objects, which we need to cast to # bytes to decode - if PY2 and isinstance(db_content, buffer): + if PY2 and isinstance(db_content, builtins.buffer): db_content = bytes(db_content) # Decode it to a Unicode string before feeding it to json.loads, so we diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index a1331c1a61..8af17921e3 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index c7987bfcdd..2743b52bad 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -29,7 +29,7 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 5623391f6e..158e9dbe7b 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -27,7 +27,7 @@ from ._base import SQLBaseStore # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index a3032cdce9..d8bf953ec0 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -30,7 +30,7 @@ from ._base import SQLBaseStore, db_to_json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: - db_binary_type = buffer + db_binary_type = six.moves.builtins.buffer else: db_binary_type = memoryview diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index f2bde74dc5..625aedc940 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -15,6 +15,8 @@ import logging +from six import integer_types + from sortedcontainers import SortedDict from synapse.util import caches @@ -47,7 +49,7 @@ class StreamChangeCache(object): def has_entity_changed(self, entity, stream_pos): """Returns True if the entity may have been updated since stream_pos """ - assert type(stream_pos) is int or type(stream_pos) is long + assert type(stream_pos) in integer_types if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() diff --git a/synctl b/synctl index bb8cb084cc..7e79b05c39 100755 --- a/synctl +++ b/synctl @@ -76,8 +76,7 @@ def start(configfile): try: subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % - (configfile,), colour=GREEN) + write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN) except subprocess.CalledProcessError as e: write( "error starting (exit code: %d); see above for logs" % e.returncode, @@ -86,21 +85,15 @@ def start(configfile): def start_worker(app, configfile, worker_configfile): - args = [ - sys.executable, "-B", - "-m", app, - "-c", configfile, - "-c", worker_configfile - ] + args = [sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile] try: subprocess.check_call(args) write("started %s(%r)" % (app, worker_configfile), colour=GREEN) except subprocess.CalledProcessError as e: write( - "error starting %s(%r) (exit code: %d); see above for logs" % ( - app, worker_configfile, e.returncode, - ), + "error starting %s(%r) (exit code: %d); see above for logs" + % (app, worker_configfile, e.returncode), colour=RED, ) @@ -120,9 +113,9 @@ def stop(pidfile, app): abort("Cannot stop %s: Unknown error" % (app,)) -Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor", "cache_factors", -]) +Worker = collections.namedtuple( + "Worker", ["app", "configfile", "pidfile", "cache_factor", "cache_factors"] +) def main(): @@ -141,24 +134,20 @@ def main(): help="the homeserver config file, defaults to homeserver.yaml", ) parser.add_argument( - "-w", "--worker", - metavar="WORKERCONFIG", - help="start or stop a single worker", + "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker" ) parser.add_argument( - "-a", "--all-processes", + "-a", + "--all-processes", metavar="WORKERCONFIGDIR", help="start or stop all the workers in the given directory" - " and the main synapse process", + " and the main synapse process", ) options = parser.parse_args() if options.worker and options.all_processes: - write( - 'Cannot use "--worker" with "--all-processes"', - stream=sys.stderr - ) + write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) sys.exit(1) configfile = options.configfile @@ -167,9 +156,7 @@ def main(): write( "No config file found\n" "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), options.configfile - ), + " --server-name='\n" % (" ".join(SYNAPSE), options.configfile), stream=sys.stderr, ) sys.exit(1) @@ -194,8 +181,7 @@ def main(): worker_configfile = options.worker if not os.path.exists(worker_configfile): write( - "No worker config found at %r" % (worker_configfile,), - stream=sys.stderr, + "No worker config found at %r" % (worker_configfile,), stream=sys.stderr ) sys.exit(1) worker_configfiles.append(worker_configfile) @@ -211,9 +197,9 @@ def main(): stream=sys.stderr, ) sys.exit(1) - worker_configfiles.extend(sorted(glob.glob( - os.path.join(worker_configdir, "*.yaml") - ))) + worker_configfiles.extend( + sorted(glob.glob(os.path.join(worker_configdir, "*.yaml"))) + ) workers = [] for worker_configfile in worker_configfiles: @@ -223,14 +209,12 @@ def main(): if worker_app == "synapse.app.homeserver": # We need to special case all of this to pick up options that may # be set in the main config file or in this worker config file. - worker_pidfile = ( - worker_config.get("pid_file") - or pidfile + worker_pidfile = worker_config.get("pid_file") or pidfile + worker_cache_factor = ( + worker_config.get("synctl_cache_factor") or cache_factor ) - worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor worker_cache_factors = ( - worker_config.get("synctl_cache_factors") - or cache_factors + worker_config.get("synctl_cache_factors") or cache_factors ) daemonize = worker_config.get("daemonize") or config.get("daemonize") assert daemonize, "Main process must have daemonize set to true" @@ -239,19 +223,27 @@ def main(): for key in worker_config: if key == "worker_app": # But we allow worker_app continue - assert not key.startswith("worker_"), \ - "Main process cannot use worker_* config" + assert not key.startswith( + "worker_" + ), "Main process cannot use worker_* config" else: worker_pidfile = worker_config["worker_pid_file"] worker_daemonize = worker_config["worker_daemonize"] assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, "worker_daemonize") + worker_configfile, + "worker_daemonize", + ) worker_cache_factor = worker_config.get("synctl_cache_factor") worker_cache_factors = worker_config.get("synctl_cache_factors", {}) - workers.append(Worker( - worker_app, worker_configfile, worker_pidfile, worker_cache_factor, - worker_cache_factors, - )) + workers.append( + Worker( + worker_app, + worker_configfile, + worker_pidfile, + worker_cache_factor, + worker_cache_factors, + ) + ) action = options.action diff --git a/tox.ini b/tox.ini index 87b5e4782d..04d2f721bf 100644 --- a/tox.ini +++ b/tox.ini @@ -108,10 +108,10 @@ commands = [testenv:pep8] skip_install = True -basepython = python2.7 +basepython = python3.6 deps = flake8 -commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}" +commands = /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}" [testenv:check_isort] skip_install = True -- cgit 1.5.1 From a6f421e8120c74fd9948e6958d794b6455e2ac85 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 20 Oct 2018 02:13:01 +0100 Subject: Run MAU queries as background processes Fixes #3820 --- changelog.d/4076.misc | 1 + synapse/app/homeserver.py | 30 ++++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 changelog.d/4076.misc (limited to 'synapse') diff --git a/changelog.d/4076.misc b/changelog.d/4076.misc new file mode 100644 index 0000000000..9dd000decf --- /dev/null +++ b/changelog.d/4076.misc @@ -0,0 +1 @@ +Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings \ No newline at end of file diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e3f0d99a3f..73ab267e8a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -530,10 +530,13 @@ def run(hs): clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) # monthly active user limiting functionality - clock.looping_call( - hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60 - ) - hs.get_datastore().reap_monthly_active_users() + def reap_monthly_active_users(): + return run_as_background_process( + "reap_monthly_active_users", + hs.get_datastore().reap_monthly_active_users, + ) + clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) + reap_monthly_active_users() @defer.inlineCallbacks def generate_monthly_active_users(): @@ -547,12 +550,23 @@ def run(hs): registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) - hs.get_datastore().initialise_reserved_users( - hs.config.mau_limits_reserved_threepids + def start_generate_monthly_active_users(): + return run_as_background_process( + "generate_monthly_active_users", + generate_monthly_active_users, + ) + + # XXX is this really supposed to be a background process? it looks + # like it needs to complete before some of the other stuff runs. + run_as_background_process( + "initialise_reserved_users", + hs.get_datastore().initialise_reserved_users, + hs.config.mau_limits_reserved_threepids, ) - generate_monthly_active_users() + + start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: - clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) + clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: -- cgit 1.5.1 From 3e8b02c9393684f2c96e221ced804a0d20de2da0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Rename _refresh_pusher This is public (or at least, called from outside the class), so ought to have a better name. --- synapse/app/pusher.py | 2 +- synapse/push/pusherpool.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 0f9f8e19f6..e06b70894e 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -183,7 +183,7 @@ class PusherReplicationHandler(ReplicationClientHandler): def start_pusher(self, user_id, app_id, pushkey): key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) - return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id) + return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) def start(config_options): diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 9f7d5ef217..bdfe27d8c1 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -86,7 +86,7 @@ class PusherPool: last_stream_ordering=last_stream_ordering, profile_tag=profile_tag, ) - yield self._refresh_pusher(app_id, pushkey, user_id) + yield self.start_pusher_by_id(app_id, pushkey, user_id) @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey, @@ -190,7 +190,8 @@ class PusherPool: logger.exception("Exception in pusher on_new_receipts") @defer.inlineCallbacks - def _refresh_pusher(self, app_id, pushkey, user_id): + def start_pusher_by_id(self, app_id, pushkey, user_id): + """Look up the details for the given pusher, and start it""" resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( app_id, pushkey ) -- cgit 1.5.1 From 04277d0ed8ba456aa6c2d5dca978013c295e7ace Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Factor PusherPool._start_pusher out of _start_pushers ... and use it from start_pusher_by_id. This mostly simplifies start_pusher_by_id. --- synapse/push/pusherpool.py | 51 ++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 18 deletions(-) (limited to 'synapse') diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index bdfe27d8c1..a5c133aa33 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -192,6 +192,9 @@ class PusherPool: @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): """Look up the details for the given pusher, and start it""" + if not self._start_pushers: + return + resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( app_id, pushkey ) @@ -202,8 +205,7 @@ class PusherPool: p = r if p: - - self._start_pushers([p]) + self._start_pusher(p) def _start_pushers(self, pushers): if not self.start_pushers: @@ -211,24 +213,37 @@ class PusherPool: return logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: - try: - p = self.pusher_factory.create_pusher(pusherdict) - except Exception: - logger.exception("Couldn't start a pusher: caught Exception") - continue - if p: - appid_pushkey = "%s:%s" % ( - pusherdict['app_id'], - pusherdict['pushkey'], - ) - byuser = self.pushers.setdefault(pusherdict['user_name'], {}) + self._start_pusher(pusherdict) + logger.info("Started pushers") - if appid_pushkey in byuser: - byuser[appid_pushkey].on_stop() - byuser[appid_pushkey] = p - run_in_background(p.on_started) + def _start_pusher(self, pusherdict): + """Start the given pusher - logger.info("Started pushers") + Args: + pusherdict (dict): + + Returns: + None + """ + try: + p = self.pusher_factory.create_pusher(pusherdict) + except Exception: + logger.exception("Couldn't start a pusher: caught Exception") + return + + if not p: + return + + appid_pushkey = "%s:%s" % ( + pusherdict['app_id'], + pusherdict['pushkey'], + ) + byuser = self.pushers.setdefault(pusherdict['user_name'], {}) + + if appid_pushkey in byuser: + byuser[appid_pushkey].on_stop() + byuser[appid_pushkey] = p + run_in_background(p.on_started) @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): -- cgit 1.5.1 From 5110f4e4258564615edfca6112ab6a36eb551672 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: move get_all_pushers call down simplifies the interface to _start_pushers --- synapse/push/pusherpool.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index a5c133aa33..b9b68ec829 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -36,8 +36,7 @@ class PusherPool: @defer.inlineCallbacks def start(self): - pushers = yield self.store.get_all_pushers() - self._start_pushers(pushers) + yield self._start_pushers() @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, @@ -207,10 +206,17 @@ class PusherPool: if p: self._start_pusher(p) - def _start_pushers(self, pushers): + @defer.inlineCallbacks + def _start_pushers(self): + """Start all the pushers + + Returns: + Deferred + """ if not self.start_pushers: logger.info("Not starting pushers because they are disabled in the config") return + pushers = yield self.store.get_all_pushers() logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: self._start_pusher(pusherdict) -- cgit 1.5.1 From c7273c11bc73630e3f2ab1a82d20a40d8b17f9a3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Give pushers their own background logcontext Each pusher has its own loop which runs for as long as it has work to do. This should run in its own background thread with its own logcontext, as other similar loops elsewhere in the system do - which means that CPU usage is consistently attributed to that loop, rather than to whatever request happened to start the loop. --- synapse/push/emailpusher.py | 48 +++++++++++++++++----------------- synapse/push/httppusher.py | 64 +++++++++++++++++++++------------------------ 2 files changed, 54 insertions(+), 58 deletions(-) (limited to 'synapse') diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index d746371420..0c9c0201e8 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -18,8 +18,7 @@ import logging from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled -from synapse.util.logcontext import LoggingContext -from synapse.util.metrics import Measure +from synapse.metrics.background_process_metrics import run_as_background_process logger = logging.getLogger(__name__) @@ -80,7 +79,7 @@ class EmailPusher(object): self.throttle_params = yield self.store.get_throttle_params_by_room( self.pusher_id ) - yield self._process() + self._start_processing() except Exception: logger.exception("Error starting email pusher") @@ -92,10 +91,10 @@ class EmailPusher(object): pass self.timed_call = None - @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) - yield self._process() + self._start_processing() + return defer.succeed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # We could wake up and cancel the timer but there tend to be quite a @@ -103,32 +102,33 @@ class EmailPusher(object): # timer fire return defer.succeed(None) - @defer.inlineCallbacks def on_timer(self): self.timed_call = None - yield self._process() + self._start_processing() - @defer.inlineCallbacks - def _process(self): + def _start_processing(self): if self.processing: return - with LoggingContext("emailpush._process"): - with Measure(self.clock, "emailpush._process"): + run_as_background_process("emailpush.process", self._process) + + @defer.inlineCallbacks + def _process(self): + try: + self.processing = True + + # if the max ordering changes while we're running _unsafe_process, + # call it again, and so on until we've caught up. + while True: + starting_max_ordering = self.max_stream_ordering try: - self.processing = True - # if the max ordering changes while we're running _unsafe_process, - # call it again, and so on until we've caught up. - while True: - starting_max_ordering = self.max_stream_ordering - try: - yield self._unsafe_process() - except Exception: - logger.exception("Exception processing notifs") - if self.max_stream_ordering == starting_max_ordering: - break - finally: - self.processing = False + yield self._unsafe_process() + except Exception: + logger.exception("Exception processing notifs") + if self.max_stream_ordering == starting_max_ordering: + break + finally: + self.processing = False @defer.inlineCallbacks def _unsafe_process(self): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 48abd5e4d6..5f6b21bc67 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -22,9 +22,8 @@ from prometheus_client import Counter from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException -from synapse.util.logcontext import LoggingContext -from synapse.util.metrics import Measure from . import push_rule_evaluator, push_tools @@ -92,34 +91,30 @@ class HttpPusher(object): self.data_minus_url.update(self.data) del self.data_minus_url['url'] - @defer.inlineCallbacks def on_started(self): - try: - yield self._process() - except Exception: - logger.exception("Error starting http pusher") + self._start_processing() + return defer.succeed(None) - @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) - yield self._process() + self._start_processing() + return defer.suceed(None) - @defer.inlineCallbacks def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... - with LoggingContext("push.on_new_receipts"): - with Measure(self.clock, "push.on_new_receipts"): - badge = yield push_tools.get_badge_count( - self.hs.get_datastore(), self.user_id - ) - yield self._send_badge(badge) + run_as_background_process("http_pusher.on_new_receipts", self._update_badge) + return defer.succeed(None) @defer.inlineCallbacks + def _update_badge(self): + badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) + yield self._send_badge(badge) + def on_timer(self): - yield self._process() + self._start_processing() def on_stop(self): if self.timed_call: @@ -129,27 +124,28 @@ class HttpPusher(object): pass self.timed_call = None - @defer.inlineCallbacks - def _process(self): + def _start_processing(self): if self.processing: return - with LoggingContext("push._process"): - with Measure(self.clock, "push._process"): + run_as_background_process("httppush.process", self._process) + + @defer.inlineCallbacks + def _process(self): + try: + self.processing = True + # if the max ordering changes while we're running _unsafe_process, + # call it again, and so on until we've caught up. + while True: + starting_max_ordering = self.max_stream_ordering try: - self.processing = True - # if the max ordering changes while we're running _unsafe_process, - # call it again, and so on until we've caught up. - while True: - starting_max_ordering = self.max_stream_ordering - try: - yield self._unsafe_process() - except Exception: - logger.exception("Exception processing notifs") - if self.max_stream_ordering == starting_max_ordering: - break - finally: - self.processing = False + yield self._unsafe_process() + except Exception: + logger.exception("Exception processing notifs") + if self.max_stream_ordering == starting_max_ordering: + break + finally: + self.processing = False @defer.inlineCallbacks def _unsafe_process(self): -- cgit 1.5.1 From e7a16c6210191e9556fb1c11cbff2d98f0a5206c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Remove redundant run_as_background_process() from pusherpool `on_new_notifications` and `on_new_receipts` in `HttpPusher` and `EmailPusher` now always return synchronously, so we can remove the `defer.gatherResults` on their results, and the `run_as_background_process` wrappers can be removed too because the PusherPool methods will now complete quickly enough. --- synapse/app/pusher.py | 4 ++-- synapse/handlers/federation.py | 4 ++-- synapse/handlers/message.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/push/emailpusher.py | 3 +-- synapse/push/httppusher.py | 2 -- synapse/push/pusherpool.py | 47 +++++++----------------------------------- 7 files changed, 14 insertions(+), 50 deletions(-) (limited to 'synapse') diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index e06b70894e..83b0863f00 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -161,11 +161,11 @@ class PusherReplicationHandler(ReplicationClientHandler): else: yield self.start_pusher(row.user_id, row.app_id, row.pushkey) elif stream_name == "events": - self.pusher_pool.on_new_notifications( + yield self.pusher_pool.on_new_notifications( token, token, ) elif stream_name == "receipts": - self.pusher_pool.on_new_receipts( + yield self.pusher_pool.on_new_receipts( token, token, set(row.room_id for row in rows) ) except Exception: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cab57a8849..63e495e3f8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2520,7 +2520,7 @@ class FederationHandler(BaseHandler): if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - self._notify_persisted_event(event, max_stream_id) + yield self._notify_persisted_event(event, max_stream_id) def _notify_persisted_event(self, event, max_stream_id): """Checks to see if notifier/pushers should be notified about the @@ -2553,7 +2553,7 @@ class FederationHandler(BaseHandler): extra_users=extra_users ) - self.pusher_pool.on_new_notifications( + return self.pusher_pool.on_new_notifications( event_stream_id, max_stream_id, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4954b23a0d..6c4fcfb10a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -779,7 +779,7 @@ class EventCreationHandler(object): event, context=context ) - self.pusher_pool.on_new_notifications( + yield self.pusher_pool.on_new_notifications( event_stream_id, max_stream_id, ) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a6f3181f09..4c2690ba26 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -119,7 +119,7 @@ class ReceiptsHandler(BaseHandler): "receipt_key", max_batch_id, rooms=affected_room_ids ) # Note that the min here shouldn't be relied upon to be accurate. - self.hs.get_pusherpool().on_new_receipts( + yield self.hs.get_pusherpool().on_new_receipts( min_batch_id, max_batch_id, affected_room_ids, ) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 0c9c0201e8..d5a99b838c 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -94,13 +94,12 @@ class EmailPusher(object): def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) self._start_processing() - return defer.succeed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # We could wake up and cancel the timer but there tend to be quite a # lot of read receipts so it's probably less work to just let the # timer fire - return defer.succeed(None) + pass def on_timer(self): self.timed_call = None diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 5f6b21bc67..770f55feae 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -98,7 +98,6 @@ class HttpPusher(object): def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) self._start_processing() - return defer.suceed(None) def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. @@ -106,7 +105,6 @@ class HttpPusher(object): # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... run_as_background_process("http_pusher.on_new_receipts", self._update_badge) - return defer.succeed(None) @defer.inlineCallbacks def _update_badge(self): diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index b9b68ec829..a4d1ce3aad 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,9 +18,8 @@ import logging from twisted.internet import defer -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push.pusher import PusherFactory -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) @@ -122,45 +121,23 @@ class PusherPool: p['app_id'], p['pushkey'], p['user_name'], ) - def on_new_notifications(self, min_stream_id, max_stream_id): - run_as_background_process( - "on_new_notifications", - self._on_new_notifications, min_stream_id, max_stream_id, - ) - @defer.inlineCallbacks - def _on_new_notifications(self, min_stream_id, max_stream_id): + def on_new_notifications(self, min_stream_id, max_stream_id): try: users_affected = yield self.store.get_push_action_users_in_range( min_stream_id, max_stream_id ) - deferreds = [] - for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - deferreds.append( - run_in_background( - p.on_new_notifications, - min_stream_id, max_stream_id, - ) - ) - - yield make_deferred_yieldable( - defer.gatherResults(deferreds, consumeErrors=True), - ) + p.on_new_notifications(min_stream_id, max_stream_id) + except Exception: logger.exception("Exception in pusher on_new_notifications") - def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): - run_as_background_process( - "on_new_receipts", - self._on_new_receipts, min_stream_id, max_stream_id, affected_room_ids, - ) - @defer.inlineCallbacks - def _on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): + def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): try: # Need to subtract 1 from the minimum because the lower bound here # is not inclusive @@ -170,21 +147,11 @@ class PusherPool: # This returns a tuple, user_id is at index 3 users_affected = set([r[3] for r in updated_receipts]) - deferreds = [] - for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - deferreds.append( - run_in_background( - p.on_new_receipts, - min_stream_id, max_stream_id, - ) - ) - - yield make_deferred_yieldable( - defer.gatherResults(deferreds, consumeErrors=True), - ) + p.on_new_receipts(min_stream_id, max_stream_id) + except Exception: logger.exception("Exception in pusher on_new_receipts") -- cgit 1.5.1 From f749607c911a82e64685f1cfaeee892c49a1b606 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Make on_started synchronous too This brings it into line with on_new_notifications and on_new_receipts. It requires a little bit of hoop-jumping in EmailPusher to load the throttle params before the first loop. --- synapse/push/emailpusher.py | 15 +++++++-------- synapse/push/httppusher.py | 1 - synapse/push/pusherpool.py | 16 ++++++++++++++-- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'synapse') diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index d5a99b838c..0bc548ae7a 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -72,16 +72,9 @@ class EmailPusher(object): self.processing = False - @defer.inlineCallbacks def on_started(self): if self.mailer is not None: - try: - self.throttle_params = yield self.store.get_throttle_params_by_room( - self.pusher_id - ) - self._start_processing() - except Exception: - logger.exception("Error starting email pusher") + self._start_processing() def on_stop(self): if self.timed_call: @@ -116,6 +109,12 @@ class EmailPusher(object): try: self.processing = True + if self.throttle_params is None: + # this is our first loop: load up the throttle params + self.throttle_params = yield self.store.get_throttle_params_by_room( + self.pusher_id + ) + # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 770f55feae..33034d44da 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -93,7 +93,6 @@ class HttpPusher(object): def on_started(self): self._start_processing() - return defer.succeed(None) def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index a4d1ce3aad..695e582dce 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -19,12 +19,24 @@ import logging from twisted.internet import defer from synapse.push.pusher import PusherFactory -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) class PusherPool: + """ + The pusher pool. This is responsible for dispatching notifications of new events to + the http and email pushers. + + It provides three methods which are designed to be called by the rest of the + application: `start`, `on_new_notifications`, and `on_new_receipts`: each of these + delegates to each of the relevant pushers. + + Note that it is expected that each pusher will have its own 'processing' loop which + will send out the notifications in the background, rather than blocking until the + notifications are sent; accordingly Pusher.on_started, Pusher.on_new_notifications and + Pusher.on_new_receipts are not expected to return deferreds. + """ def __init__(self, _hs): self.hs = _hs self.pusher_factory = PusherFactory(_hs) @@ -216,7 +228,7 @@ class PusherPool: if appid_pushkey in byuser: byuser[appid_pushkey].on_stop() byuser[appid_pushkey] = p - run_in_background(p.on_started) + p.on_started() @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): -- cgit 1.5.1 From 026cd91ac878bc723f653f393fd1f44c438ba559 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Oct 2018 16:12:11 +0100 Subject: Run PusherPool.start as a background process We don't do anything with the result, so this is needed to give this code a logcontext. --- synapse/push/pusherpool.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 695e582dce..d25877cc66 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push.pusher import PusherFactory logger = logging.getLogger(__name__) @@ -45,9 +46,13 @@ class PusherPool: self.clock = self.hs.get_clock() self.pushers = {} - @defer.inlineCallbacks def start(self): - yield self._start_pushers() + """Starts the pushers off in a background process. + """ + if not self.start_pushers: + logger.info("Not starting pushers because they are disabled in the config") + return + run_as_background_process("start_pushers", self._start_pushers) @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, @@ -192,9 +197,6 @@ class PusherPool: Returns: Deferred """ - if not self.start_pushers: - logger.info("Not starting pushers because they are disabled in the config") - return pushers = yield self.store.get_all_pushers() logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: -- cgit 1.5.1 From 5c445114d356c9c23b99f0a2c4246be983a38b69 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 23 Oct 2018 13:12:32 +0100 Subject: Correctly account for cpu usage by background threads (#4074) Wrap calls to deferToThread() in a thing which uses a child logcontext to attribute CPU usage to the right request. While we're in the area, remove the logcontext_tracer stuff, which is never used, and afaik doesn't work. Fixes #4064 --- changelog.d/4074.bugfix | 1 + synapse/handlers/auth.py | 18 +---- synapse/rest/media/v1/media_repository.py | 24 +++--- synapse/rest/media/v1/media_storage.py | 8 +- synapse/rest/media/v1/storage_provider.py | 6 +- synapse/util/logcontext.py | 120 +++++++++++++++++------------- 6 files changed, 97 insertions(+), 80 deletions(-) create mode 100644 changelog.d/4074.bugfix (limited to 'synapse') diff --git a/changelog.d/4074.bugfix b/changelog.d/4074.bugfix new file mode 100644 index 0000000000..b3b6b00243 --- /dev/null +++ b/changelog.d/4074.bugfix @@ -0,0 +1 @@ +Correctly account for cpu usage by background threads diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2a5eab124f..329e3c7d71 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -22,7 +22,7 @@ import bcrypt import pymacaroons from canonicaljson import json -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.web.client import PartialDownloadError import synapse.util.stringutils as stringutils @@ -37,8 +37,8 @@ from synapse.api.errors import ( ) from synapse.module_api import ModuleApi from synapse.types import UserID +from synapse.util import logcontext from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable from ._base import BaseHandler @@ -884,11 +884,7 @@ class AuthHandler(BaseHandler): bcrypt.gensalt(self.bcrypt_rounds), ).decode('ascii') - return make_deferred_yieldable( - threads.deferToThreadPool( - self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash - ), - ) + return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -913,13 +909,7 @@ class AuthHandler(BaseHandler): if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode('ascii') - return make_deferred_yieldable( - threads.deferToThreadPool( - self.hs.get_reactor(), - self.hs.get_reactor().getThreadPool(), - _do_validate_hash, - ), - ) + return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash) else: return defer.succeed(False) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index a828ff4438..08b1867fab 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -25,7 +25,7 @@ from six.moves.urllib import parse as urlparse import twisted.internet.error import twisted.web.http -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.web.resource import Resource from synapse.api.errors import ( @@ -36,8 +36,8 @@ from synapse.api.errors import ( ) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import logcontext from synapse.util.async_helpers import Linearizer -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import is_ascii, random_string @@ -492,10 +492,11 @@ class MediaRepository(object): )) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - )) + ) if t_byte_source: try: @@ -534,10 +535,11 @@ class MediaRepository(object): )) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - )) + ) if t_byte_source: try: @@ -620,15 +622,17 @@ class MediaRepository(object): for (t_width, t_height, t_type), t_method in iteritems(thumbnails): # Generate the thumbnail if t_method == "crop": - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type, - )) + ) elif t_method == "scale": - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + t_byte_source = yield logcontext.defer_to_thread( + self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type, - )) + ) else: logger.error("Unrecognized method: %r", t_method) continue diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index a6189224ee..896078fe76 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -21,9 +21,10 @@ import sys import six -from twisted.internet import defer, threads +from twisted.internet import defer from twisted.protocols.basic import FileSender +from synapse.util import logcontext from synapse.util.file_consumer import BackgroundFileConsumer from synapse.util.logcontext import make_deferred_yieldable @@ -64,9 +65,10 @@ class MediaStorage(object): with self.store_into_file(file_info) as (f, fname, finish_cb): # Write to the main repository - yield make_deferred_yieldable(threads.deferToThread( + yield logcontext.defer_to_thread( + self.hs.get_reactor(), _write_file_synchronously, source, f, - )) + ) yield finish_cb() defer.returnValue(fname) diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 7b9f8b4d79..5aa03031f6 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -17,9 +17,10 @@ import logging import os import shutil -from twisted.internet import defer, threads +from twisted.internet import defer from synapse.config._base import Config +from synapse.util import logcontext from synapse.util.logcontext import run_in_background from .media_storage import FileResponder @@ -120,7 +121,8 @@ class FileStorageProviderBackend(StorageProvider): if not os.path.exists(dirname): os.makedirs(dirname) - return threads.deferToThread( + return logcontext.defer_to_thread( + self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname, ) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 89224b26cc..4c6e92beb8 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -25,7 +25,7 @@ See doc/log_contexts.rst for details on how this works. import logging import threading -from twisted.internet import defer +from twisted.internet import defer, threads logger = logging.getLogger(__name__) @@ -562,58 +562,76 @@ def _set_context_cb(result, context): return result -# modules to ignore in `logcontext_tracer` -_to_ignore = [ - "synapse.util.logcontext", - "synapse.http.server", - "synapse.storage._base", - "synapse.util.async_helpers", -] +def defer_to_thread(reactor, f, *args, **kwargs): + """ + Calls the function `f` using a thread from the reactor's default threadpool and + returns the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked, and whose threadpool we should use for the + function. + + Normally this will be hs.get_reactor(). + + f (callable): The function to call. + args: positional arguments to pass to f. -def logcontext_tracer(frame, event, arg): - """A tracer that logs whenever a logcontext "unexpectedly" changes within - a function. Probably inaccurate. + kwargs: keyword arguments to pass to f. - Use by calling `sys.settrace(logcontext_tracer)` in the main thread. + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. """ - if event == 'call': - name = frame.f_globals["__name__"] - if name.startswith("synapse"): - if name == "synapse.util.logcontext": - if frame.f_code.co_name in ["__enter__", "__exit__"]: - tracer = frame.f_back.f_trace - if tracer: - tracer.just_changed = True - - tracer = frame.f_trace - if tracer: - return tracer - - if not any(name.startswith(ig) for ig in _to_ignore): - return LineTracer() - - -class LineTracer(object): - __slots__ = ["context", "just_changed"] - - def __init__(self): - self.context = LoggingContext.current_context() - self.just_changed = False - - def __call__(self, frame, event, arg): - if event in 'line': - if self.just_changed: - self.context = LoggingContext.current_context() - self.just_changed = False - else: - c = LoggingContext.current_context() - if c != self.context: - logger.info( - "Context changed! %s -> %s, %s, %s", - self.context, c, - frame.f_code.co_filename, frame.f_lineno - ) - self.context = c + return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) - return self + +def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): + """ + A wrapper for twisted.internet.threads.deferToThreadpool, which handles + logcontexts correctly. + + Calls the function `f` using a thread from the given threadpool and returns + the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked. Normally this will be hs.get_reactor(). + + threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for + running `f`. Normally this will be hs.get_reactor().getThreadPool(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + logcontext = LoggingContext.current_context() + + def g(): + with LoggingContext(parent_context=logcontext): + return f(*args, **kwargs) + + return make_deferred_yieldable( + threads.deferToThreadPool(reactor, threadpool, g) + ) -- cgit 1.5.1 From b3f6dddad2c3777e3620acfbb97e19409a5beb81 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 23 Oct 2018 14:29:17 +0100 Subject: Give some more things logcontexts (#4077) --- changelog.d/4077.misc | 1 + synapse/handlers/deactivate_account.py | 4 ++-- synapse/handlers/user_directory.py | 14 +++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/4077.misc (limited to 'synapse') diff --git a/changelog.d/4077.misc b/changelog.d/4077.misc new file mode 100644 index 0000000000..52ca4c1de2 --- /dev/null +++ b/changelog.d/4077.misc @@ -0,0 +1 @@ +Give some more things logcontexts diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index b078df4a76..75fe50c42c 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -17,8 +17,8 @@ import logging from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID, create_requester -from synapse.util.logcontext import run_in_background from ._base import BaseHandler @@ -121,7 +121,7 @@ class DeactivateAccountHandler(BaseHandler): None """ if not self._user_parter_running: - run_in_background(self._user_parter_loop) + run_as_background_process("user_parter_loop", self._user_parter_loop) @defer.inlineCallbacks def _user_parter_loop(self): diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index d8413d6aa7..f11b430126 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -20,6 +20,7 @@ from six import iteritems from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.roommember import ProfileInfo from synapse.types import get_localpart_from_id from synapse.util.metrics import Measure @@ -98,7 +99,6 @@ class UserDirectoryHandler(object): """ return self.store.search_user_dir(user_id, search_term, limit) - @defer.inlineCallbacks def notify_new_event(self): """Called when there may be more deltas to process """ @@ -108,11 +108,15 @@ class UserDirectoryHandler(object): if self._is_processing: return + @defer.inlineCallbacks + def process(): + try: + yield self._unsafe_process() + finally: + self._is_processing = False + self._is_processing = True - try: - yield self._unsafe_process() - finally: - self._is_processing = False + run_as_background_process("user_directory.notify_new_event", process) @defer.inlineCallbacks def handle_local_profile_change(self, user_id, profile): -- cgit 1.5.1 From 6105c6101fa0f4560daf7d23dedcfd217530b02a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:24:58 +0100 Subject: fix race condiftion in calling initialise_reserved_users --- changelog.d/4081.bugfix | 2 ++ synapse/app/homeserver.py | 8 ------ synapse/storage/monthly_active_users.py | 46 +++++++++++++++++++++--------- synapse/storage/registration.py | 16 ++++++++--- tests/storage/test_monthly_active_users.py | 10 +++++-- 5 files changed, 55 insertions(+), 27 deletions(-) create mode 100644 changelog.d/4081.bugfix (limited to 'synapse') diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix new file mode 100644 index 0000000000..f275acb61c --- /dev/null +++ b/changelog.d/4081.bugfix @@ -0,0 +1,2 @@ +Fix race condition in populating reserved users + diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0b85b377e3..593e1e75db 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -553,14 +553,6 @@ def run(hs): generate_monthly_active_users, ) - # XXX is this really supposed to be a background process? it looks - # like it needs to complete before some of the other stuff runs. - run_as_background_process( - "initialise_reserved_users", - hs.get_datastore().initialise_reserved_users, - hs.config.mau_limits_reserved_threepids, - ) - start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 0fe8c8e24c..26e577814a 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,19 +33,28 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () + self.initialise_reserved_users( + dbconn.cursor(), hs.config.mau_limits_reserved_threepids + ) - @defer.inlineCallbacks - def initialise_reserved_users(self, threepids): - store = self.hs.get_datastore() + def initialise_reserved_users(self, txn, threepids): + """ + Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Arguments: + threepids []: List of threepid dicts to reserve + """ reserved_user_list = [] # Do not add more reserved users than the total allowable number for tp in threepids[:self.hs.config.max_mau_value]: - user_id = yield store.get_user_id_by_threepid( + user_id = self.get_user_id_by_threepid_txn( + txn, tp["medium"], tp["address"] ) if user_id: - yield self.upsert_monthly_active_user(user_id) + self.upsert_monthly_active_user_txn(txn, user_id) reserved_user_list.append(user_id) else: logger.warning( @@ -55,8 +64,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def reap_monthly_active_users(self): - """ - Cleans out monthly active user table to ensure that no stale + """Cleans out monthly active user table to ensure that no stale entries exist. Returns: @@ -165,19 +173,33 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): + """Updates or inserts monthly active user member + Arguments: + user_id (str): user to add/update + """ + is_insert = yield self.runInteraction( + "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, + user_id + ) + if is_insert: + self.user_last_seen_monthly_active.invalidate((user_id,)) + self.get_monthly_active_count.invalidate(()) + + def upsert_monthly_active_user_txn(self, txn, user_id): """ Updates or inserts monthly active user member Arguments: + txn (cursor): user_id (str): user to add/update - Deferred[bool]: True if a new entry was created, False if an + bool: True if a new entry was created, False if an existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more - is_insert = yield self._simple_upsert( - desc="upsert_monthly_active_user", + is_insert = self._simple_upsert_txn( + txn, table="monthly_active_users", keyvalues={ "user_id": user_id, @@ -186,9 +208,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) - if is_insert: - self.user_last_seen_monthly_active.invalidate((user_id,)) - self.get_monthly_active_count.invalidate(()) + return is_insert @cached(num_args=1) def user_last_seen_monthly_active(self, user_id): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26b429e307..01931f29cc 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,17 +474,25 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): - ret = yield self._simple_select_one( + user_id = yield self.runInteraction( + "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, + medium, address + ) + defer.returnValue(user_id) + + def get_user_id_by_threepid_txn(self, txn, medium, address): + ret = self._simple_select_one_txn( + txn, "user_threepids", { "medium": medium, "address": address }, - ['user_id'], True, 'get_user_id_by_threepid' + ['user_id'], True ) if ret: - defer.returnValue(ret['user_id']) - defer.returnValue(None) + return ret['user_id'] + return None def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 686f12a0dc..0c17745ae7 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -52,7 +52,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - self.store.initialise_reserved_users(threepids) + + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) self.pump() active_count = self.store.get_monthly_active_count() @@ -199,7 +202,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): {'medium': 'email', 'address': user2_email}, ] self.hs.config.mau_limits_reserved_threepids = threepids - self.store.initialise_reserved_users(threepids) + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) + self.pump() count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) -- cgit 1.5.1 From 329d18b39cbc1bc9eba8ae9de1fcf734d0cf1a78 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:27:20 +0100 Subject: remove white space --- changelog.d/4081.bugfix | 1 - synapse/storage/monthly_active_users.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'synapse') diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index f275acb61c..13dad58842 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1,2 +1 @@ Fix race condition in populating reserved users - diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 26e577814a..cf15f8c5ba 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -38,8 +38,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): ) def initialise_reserved_users(self, txn, threepids): - """ - Ensures that reserved threepids are accounted for in the MAU table, should + """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. Arguments: -- cgit 1.5.1 From 3e704822be40a7d1d3fa82bf554f6a85823b5686 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 23 Oct 2018 10:25:31 -0600 Subject: Comments help --- synapse/handlers/groups_local.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'synapse') diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index cb7e7d49cb..173315af6c 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -41,6 +41,11 @@ def _create_rerouter(func_name): destination, group_id, *args, **kwargs ) + # Capture errors returned by the remote homeserver and + # re-throw specific errors as SynapseErrors. This is so + # when the remote end responds with things like 403 Not + # In Group, we can communicate that to the client instead + # of a 500. def h(failure): failure.trap(HttpResponseException) e = failure.value -- cgit 1.5.1 From a67d8ace9bc8b5f5ab953fdcfd6ade077337782d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 17:44:39 +0100 Subject: remove errant exception and style --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 79c2711fb4..1e33f1b3b8 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto-join rooms, should automatically create those rooms when first user registers +Servers with auto-join rooms, will now automatically create those rooms when the first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4b9bf6f2d1..5df321b287 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -18,7 +18,7 @@ from distutils.util import strtobool from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config, ConfigError +from synapse.config._base import Config, ConfigError class RegistrationConfig(Config): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1b5873c8d7..9615dd552f 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -231,15 +231,15 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - if self.hs.hostname != RoomAlias.from_string(r).domain: - logger.warn( + room_alias = RoomAlias.from_string(r) + if self.hs.hostname != room_alias.domain: + logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain' % (r,) + 'it does not match server domain', (r,) ) - raise SynapseError() else: # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart + room_alias_localpart = room_alias.localpart yield self.room_creation_handler.create_room( fake_requester, config={ -- cgit 1.5.1 From e564306e315fc3dfd37e5fed495ae300fbb58c8a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 09:23:33 +0100 Subject: sanity-check the is_processing flag ... and rename it, for even more sanity --- synapse/push/emailpusher.py | 11 +++++++---- synapse/push/httppusher.py | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'synapse') diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 0bc548ae7a..f369124258 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -70,7 +70,7 @@ class EmailPusher(object): # See httppusher self.max_stream_ordering = None - self.processing = False + self._is_processing = False def on_started(self): if self.mailer is not None: @@ -99,15 +99,18 @@ class EmailPusher(object): self._start_processing() def _start_processing(self): - if self.processing: + if self._is_processing: return run_as_background_process("emailpush.process", self._process) @defer.inlineCallbacks def _process(self): + # we should never get here if we are already processing + assert not self._is_processing + try: - self.processing = True + self._is_processing = True if self.throttle_params is None: # this is our first loop: load up the throttle params @@ -126,7 +129,7 @@ class EmailPusher(object): if self.max_stream_ordering == starting_max_ordering: break finally: - self.processing = False + self._is_processing = False @defer.inlineCallbacks def _unsafe_process(self): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 33034d44da..6bd703632d 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -60,7 +60,7 @@ class HttpPusher(object): self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.failing_since = pusherdict['failing_since'] self.timed_call = None - self.processing = False + self._is_processing = False # This is the highest stream ordering we know it's safe to process. # When new events arrive, we'll be given a window of new events: we @@ -122,15 +122,18 @@ class HttpPusher(object): self.timed_call = None def _start_processing(self): - if self.processing: + if self._is_processing: return run_as_background_process("httppush.process", self._process) @defer.inlineCallbacks def _process(self): + # we should never get here if we are already processing + assert not self._is_processing + try: - self.processing = True + self._is_processing = True # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: @@ -142,7 +145,7 @@ class HttpPusher(object): if self.max_stream_ordering == starting_max_ordering: break finally: - self.processing = False + self._is_processing = False @defer.inlineCallbacks def _unsafe_process(self): -- cgit 1.5.1 From c573794b22a47a0f03c80f2732922c3375e7b74c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 09:24:55 +0100 Subject: Fix start_pushers vs _start_pushers confusion --- synapse/push/pusherpool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d25877cc66..5a4e73ccd6 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -41,7 +41,7 @@ class PusherPool: def __init__(self, _hs): self.hs = _hs self.pusher_factory = PusherFactory(_hs) - self.start_pushers = _hs.config.start_pushers + self._should_start_pushers = _hs.config.start_pushers self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() self.pushers = {} @@ -49,7 +49,7 @@ class PusherPool: def start(self): """Starts the pushers off in a background process. """ - if not self.start_pushers: + if not self._should_start_pushers: logger.info("Not starting pushers because they are disabled in the config") return run_as_background_process("start_pushers", self._start_pushers) @@ -175,7 +175,7 @@ class PusherPool: @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): """Look up the details for the given pusher, and start it""" - if not self._start_pushers: + if not self._should_start_pushers: return resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( -- cgit 1.5.1 From 810715f79aafc586e70e9dcd3da4a2cc6823f704 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Oct 2018 09:44:22 +0100 Subject: Rename resolve_events_with_factory --- synapse/handlers/federation.py | 4 ++-- synapse/state/__init__.py | 14 +++++++------- synapse/state/v1.py | 2 +- synapse/state/v2.py | 2 +- tests/state/test_v2.py | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b028d58ae4..91a18a552c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -53,7 +53,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet -from synapse.state import StateResolutionStore, resolve_events_with_factory +from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.types import UserID, get_domain_from_id from synapse.util import logcontext, unwrapFirstError from synapse.util.async_helpers import Linearizer @@ -385,7 +385,7 @@ class FederationHandler(BaseHandler): event_map[x.event_id] = x room_version = yield self.store.get_room_version(room_id) - state_map = yield resolve_events_with_factory( + state_map = yield resolve_events_with_store( room_version, state_maps, event_map, state_res_store=StateResolutionStore(self.store), ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 836e137296..9b40b18d5b 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -394,7 +394,7 @@ class StateHandler(object): } with Measure(self.clock, "state._resolve_events"): - new_state = yield resolve_events_with_factory( + new_state = yield resolve_events_with_store( room_version, state_set_ids, event_map=state_map, state_res_store=StateResolutionStore(self.store), @@ -478,10 +478,10 @@ class StateResolutionHandler(object): # start by assuming we won't have any conflicted state, and build up the new # state map by iterating through the state groups. If we discover a conflict, - # we give up and instead use `resolve_events_with_factory`. + # we give up and instead use `resolve_events_with_store`. # # XXX: is this actually worthwhile, or should we just let - # resolve_events_with_factory do it? + # resolve_events_with_store do it? new_state = {} conflicted_state = False for st in itervalues(state_groups_ids): @@ -496,7 +496,7 @@ class StateResolutionHandler(object): if conflicted_state: logger.info("Resolving conflicted state for %r", room_id) with Measure(self.clock, "state._resolve_events"): - new_state = yield resolve_events_with_factory( + new_state = yield resolve_events_with_store( room_version, list(itervalues(state_groups_ids)), event_map=event_map, @@ -581,7 +581,7 @@ def _make_state_cache_entry( ) -def resolve_events_with_factory(room_version, state_sets, event_map, state_res_store): +def resolve_events_with_store(room_version, state_sets, event_map, state_res_store): """ Args: room_version(str): Version of the room @@ -604,11 +604,11 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_res_s a map from (type, state_key) to event_id. """ if room_version == RoomVersions.V1: - return v1.resolve_events_with_factory( + return v1.resolve_events_with_store( state_sets, event_map, state_res_store.get_events, ) elif room_version == RoomVersions.VDH_TEST: - return v2.resolve_events_with_factory( + return v2.resolve_events_with_store( state_sets, event_map, state_res_store, ) else: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 7a7157b352..70a981f4a2 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -31,7 +31,7 @@ POWER_KEY = (EventTypes.PowerLevels, "") @defer.inlineCallbacks -def resolve_events_with_factory(state_sets, event_map, state_map_factory): +def resolve_events_with_store(state_sets, event_map, state_map_factory): """ Args: state_sets(list): List of dicts of (type, state_key) -> event_id, diff --git a/synapse/state/v2.py b/synapse/state/v2.py index d034b4f38e..5d06f7e928 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) @defer.inlineCallbacks -def resolve_events_with_factory(state_sets, event_map, state_res_store): +def resolve_events_with_store(state_sets, event_map, state_res_store): """Resolves the state using the v2 state resolution algorithm Args: diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index b96bb6b25c..73f2ef7557 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -24,7 +24,7 @@ from synapse.event_auth import auth_types_for_event from synapse.events import FrozenEvent from synapse.state.v2 import ( lexicographical_topological_sort, - resolve_events_with_factory, + resolve_events_with_store, ) from synapse.types import EventID @@ -541,7 +541,7 @@ class StateTestCase(unittest.TestCase): elif len(prev_events) == 1: state_before = dict(state_at_event[prev_events[0]]) else: - state_d = resolve_events_with_factory( + state_d = resolve_events_with_store( [state_at_event[n] for n in prev_events], event_map=event_map, state_res_store=TestStateResolutionStore(event_map), -- cgit 1.5.1 From dacbeb2e03274d347bd3d3919a00a56661dbb002 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Oct 2018 09:47:49 +0100 Subject: Comment --- synapse/handlers/federation.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 91a18a552c..1daa08df7e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -390,6 +390,11 @@ class FederationHandler(BaseHandler): state_res_store=StateResolutionStore(self.store), ) + # We need to give _process_received_pdu the actual state events + # rather than event ids, so generate that now. + + # First though we need to fetch all the events that are in + # state_map, so we can build up the state below. evs = yield self.store.get_events( list(state_map.values()), get_prev_content=False, @@ -397,8 +402,6 @@ class FederationHandler(BaseHandler): ) event_map.update(evs) - # we need to give _process_received_pdu the actual state events - # rather than event ids, so generate that now. state = [ event_map[e] for e in six.itervalues(state_map) ] -- cgit 1.5.1 From 47a9ba435d9a4b9e311d9d9a3c02be105942f357 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 10:26:50 +0100 Subject: Use match rather than search --- synapse/config/room_directory.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 41ef3217e8..2ca010afdb 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -92,10 +92,11 @@ class _AliasRule(object): boolean """ - if not self._user_id_regex.search(user_id): + # Note: The regexes are anchored at both ends + if not self._user_id_regex.match(user_id): return False - if not self._alias_regex.search(alias): + if not self._alias_regex.match(alias): return False return True -- cgit 1.5.1 From ef771cc4c2d988e5188ba7e75df9adebb0ebafe1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 10:35:01 +0100 Subject: Fix a number of flake8 errors Broadly three things here: * disable W504 which seems a bit whacko * remove a bunch of `as e` expressions from exception handlers that don't use them * use `r""` for strings which include backslashes Also, we don't use pep8 any more, so we can get rid of the duplicate config there. --- changelog.d/4082.misc | 1 + scripts-dev/tail-synapse.py | 2 +- scripts/synapse_port_db | 3 ++- setup.cfg | 17 ++++++++--------- synapse/config/repository.py | 2 +- synapse/crypto/keyclient.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/storage/registration.py | 2 +- tests/config/test_generate.py | 2 +- tests/events/test_utils.py | 4 ++-- 12 files changed, 21 insertions(+), 20 deletions(-) create mode 100644 changelog.d/4082.misc (limited to 'synapse') diff --git a/changelog.d/4082.misc b/changelog.d/4082.misc new file mode 100644 index 0000000000..a81faf5e9b --- /dev/null +++ b/changelog.d/4082.misc @@ -0,0 +1 @@ +Clean up some bits of code which were flagged by the linter diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py index 1a36b94038..7c9985d9f0 100644 --- a/scripts-dev/tail-synapse.py +++ b/scripts-dev/tail-synapse.py @@ -48,7 +48,7 @@ def main(): row.name: row.position for row in replicate(server, {"streams": "-1"})["streams"].rows } - except requests.exceptions.ConnectionError as e: + except requests.exceptions.ConnectionError: time.sleep(0.1) while True: diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 2f6e69e552..3c7b606323 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -501,7 +501,8 @@ class Porter(object): try: yield self.postgres_store.runInteraction("alter_table", alter_table) - except Exception as e: + except Exception: + # On Error Resume Next pass yield self.postgres_store.runInteraction( diff --git a/setup.cfg b/setup.cfg index 52feaa9cc7..b6b4aa740d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,17 +14,16 @@ ignore = pylint.cfg tox.ini -[pep8] -max-line-length = 90 -# W503 requires that binary operators be at the end, not start, of lines. Erik -# doesn't like it. E203 is contrary to PEP8. E731 is silly. -ignore = W503,E203,E731 - [flake8] -# note that flake8 inherits the "ignore" settings from "pep8" (because it uses -# pep8 to do those checks), but not the "max-line-length" setting max-line-length = 90 -ignore=W503,E203,E731 + +# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes +# for error codes. The ones we ignore are: +# W503: line break before binary operator +# W504: line break after binary operator +# E203: whitespace before ':' (which is contrary to pep8?) +# E731: do not assign a lambda expression, use a def +ignore=W503,W504,E203,E731 [isort] line_length = 89 diff --git a/synapse/config/repository.py b/synapse/config/repository.py index fc909c1fac..06c62ab62c 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -178,7 +178,7 @@ class ContentRepositoryConfig(Config): def default_config(self, **kwargs): media_store = self.default_path("media_store") uploads_path = self.default_path("uploads") - return """ + return r""" # Directory where uploaded images and attachments are stored. media_store_path: "%(media_store)s" diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 57d4665e84..080c81f14b 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -55,7 +55,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): raise IOError("Cannot get key for %r" % server_name) except (ConnectError, DomainError) as e: logger.warn("Error getting key for %r: %s", server_name, e) - except Exception as e: + except Exception: logger.exception("Error getting key for %r", server_name) raise IOError("Cannot get key for %r" % server_name) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 4efe95faa4..af0107a46e 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -800,7 +800,7 @@ class FederationHandlerRegistry(object): yield handler(origin, content) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) - except Exception as e: + except Exception: logger.exception("Failed to handle edu %r", edu_type) def on_query(self, query_type, args): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bd8b5f4afa..693b303881 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -99,7 +99,7 @@ class AuthRestServlet(RestServlet): cannot be handled in the normal flow (with requests to the same endpoint). Current use is for web fallback auth. """ - PATTERNS = client_v2_patterns("/auth/(?P[\w\.]*)/fallback/web") + PATTERNS = client_v2_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): super(AuthRestServlet, self).__init__() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 8c892ff187..1a7bfd6b56 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -674,7 +674,7 @@ def summarize_paragraphs(text_nodes, min_size=200, max_size=500): # This splits the paragraph into words, but keeping the # (preceeding) whitespace intact so we can easily concat # words back together. - for match in re.finditer("\s*\S+", description): + for match in re.finditer(r"\s*\S+", description): word = match.group() # Keep adding words while the total length is less than diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26b429e307..2dd14aba1c 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -567,7 +567,7 @@ class RegistrationStore(RegistrationWorkerStore, def _find_next_generated_user_id(txn): txn.execute("SELECT name FROM users") - regex = re.compile("^@(\d+):") + regex = re.compile(r"^@(\d+):") found = set() diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index f88d28a19d..0c23068bcf 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -67,6 +67,6 @@ class ConfigGenerationTestCase(unittest.TestCase): with open(log_config_file) as f: config = f.read() # find the 'filename' line - matches = re.findall("^\s*filename:\s*(.*)$", config, re.M) + matches = re.findall(r"^\s*filename:\s*(.*)$", config, re.M) self.assertEqual(1, len(matches)) self.assertEqual(matches[0], expected) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index ff217ca8b9..d0cc492deb 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -156,7 +156,7 @@ class SerializeEventTestCase(unittest.TestCase): room_id="!foo:bar", content={"key.with.dots": {}}, ), - ["content.key\.with\.dots"], + [r"content.key\.with\.dots"], ), {"content": {"key.with.dots": {}}}, ) @@ -172,7 +172,7 @@ class SerializeEventTestCase(unittest.TestCase): "nested.dot.key": {"leaf.key": 42, "not_me_either": 1}, }, ), - ["content.nested\.dot\.key.leaf\.key"], + [r"content.nested\.dot\.key.leaf\.key"], ), {"content": {"nested.dot.key": {"leaf.key": 42}}}, ) -- cgit 1.5.1 From 7e07d25ed64e6af403dd508a2b5226e8d18fc1ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 10:41:45 +0100 Subject: Allow backslashes in event field filters Fixes a bug introduced in https://github.com/matrix-org/synapse/pull/1783 which meant that single backslashes were not allowed in event field filters. The intention here is to allow single-backslashes, but disallow double-backslashes. --- changelog.d/4083.bugfix | 1 + synapse/api/filtering.py | 5 ++++- tests/api/test_filtering.py | 12 +++++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/4083.bugfix (limited to 'synapse') diff --git a/changelog.d/4083.bugfix b/changelog.d/4083.bugfix new file mode 100644 index 0000000000..b3b08cdfa6 --- /dev/null +++ b/changelog.d/4083.bugfix @@ -0,0 +1 @@ +Fix bug which prevented backslashes being used in event field filters \ No newline at end of file diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index eed8c67e6a..677c0bdd4c 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -172,7 +172,10 @@ USER_FILTER_SCHEMA = { # events a lot easier as we can then use a negative lookbehind # assertion to split '\.' If we allowed \\ then it would # incorrectly split '\\.' See synapse.events.utils.serialize_event - "pattern": "^((?!\\\).)*$" + # + # Note that because this is a regular expression, we have to escape + # each backslash in the pattern. + "pattern": r"^((?!\\\\).)*$" } } }, diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 48b2d3d663..2a7044801a 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -60,7 +60,7 @@ class FilteringTestCase(unittest.TestCase): invalid_filters = [ {"boom": {}}, {"account_data": "Hello World"}, - {"event_fields": ["\\foo"]}, + {"event_fields": [r"\\foo"]}, {"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}}, {"event_format": "other"}, {"room": {"not_rooms": ["#foo:pik-test"]}}, @@ -109,6 +109,16 @@ class FilteringTestCase(unittest.TestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, + + # a single backslash should be permitted (though it is debatable whether + # it should be permitted before anything other than `.`, and what that + # actually means) + # + # (note that event_fields is implemented in + # synapse.events.utils.serialize_event, and so whether this actually works + # is tested elsewhere. We just want to check that it is allowed through the + # filter validation) + {"event_fields": [r"foo\.bar"]}, ] for filter in valid_filters: try: -- cgit 1.5.1 From 94a49e0636313507fab2e43b99f969385f535ea2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Oct 2018 14:39:23 +0100 Subject: fix tuple Co-Authored-By: neilisfragile --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 9615dd552f..217928add1 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,8 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', (r,) + 'it does not match server domain', + r, ) else: # create room expects the localpart of the room alias -- cgit 1.5.1 From 9532caf6ef3cdc6dba80b11a920410b50d3490dd Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:08:25 +0100 Subject: remove trailing whiter space --- synapse/handlers/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 217928add1..e9d7b25a36 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,7 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', + 'it does not match server domain', r, ) else: -- cgit 1.5.1 From 9ec218658650558a704e939e56f87d1df1a84423 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:09:21 +0100 Subject: isort --- synapse/config/registration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 5df321b287..7480ed5145 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool +from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from synapse.config._base import Config, ConfigError - class RegistrationConfig(Config): -- cgit 1.5.1 From 663d9db8e7892b79a927de70ec9add64312827d5 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:17:30 +0100 Subject: commit transaction before closing --- synapse/server.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/server.py b/synapse/server.py index 3e9d3d8256..cf6b872cbd 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -207,6 +207,7 @@ class HomeServer(object): logger.info("Setting up.") with self.get_db_conn() as conn: self.datastore = self.DATASTORE_CLASS(conn, self) + conn.commit() logger.info("Finished setting up.") def get_reactor(self): -- cgit 1.5.1 From ea69a84bbb2fc9c1da6db0384a98f577f3bc95a7 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:18:08 +0100 Subject: fix style inconsistencies --- changelog.d/4081.bugfix | 3 ++- synapse/storage/monthly_active_users.py | 43 +++++++++++++++++++----------- synapse/storage/registration.py | 19 +++++++++++++ tests/storage/test_monthly_active_users.py | 4 +-- 4 files changed, 51 insertions(+), 18 deletions(-) (limited to 'synapse') diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index 13dad58842..cfe4b3e9d9 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1 +1,2 @@ -Fix race condition in populating reserved users +Fix race condition where config defined reserved users were not being added to +the monthly active user list prior to the homeserver reactor firing up diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index cf15f8c5ba..9a5c3b7eda 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,21 +33,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () - self.initialise_reserved_users( - dbconn.cursor(), hs.config.mau_limits_reserved_threepids + # Do not add more reserved users than the total allowable number + self._initialise_reserved_users( + dbconn.cursor(), + hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value], ) - def initialise_reserved_users(self, txn, threepids): + def _initialise_reserved_users(self, txn, threepids): """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. - Arguments: - threepids []: List of threepid dicts to reserve + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve """ reserved_user_list = [] - # Do not add more reserved users than the total allowable number - for tp in threepids[:self.hs.config.max_mau_value]: + for tp in threepids: user_id = self.get_user_id_by_threepid_txn( txn, tp["medium"], tp["address"] @@ -172,26 +174,36 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): - """Updates or inserts monthly active user member - Arguments: + """Updates or inserts the user into the monthly active user table, which + is used to track the current MAU usage of the server + + Args: user_id (str): user to add/update """ is_insert = yield self.runInteraction( "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) + # Considered pushing cache invalidation down into txn method, but + # did not because txn is not a LoggingTransaction. This means I could not + # call txn.call_after(). Therefore cache is altered in background thread + # and calls from elsewhere to user_last_seen_monthly_active and + # get_monthly_active_count fail with ValueError in + # synapse/util/caches/descriptors.py#check_thread if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): - """ - Updates or inserts monthly active user member - Arguments: - txn (cursor): - user_id (str): user to add/update + """Updates or inserts monthly active user member + + Args: + txn (cursor): + user_id (str): user to add/update + + Returns: bool: True if a new entry was created, False if an - existing one was updated. + existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple @@ -207,6 +219,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) + return is_insert @cached(num_args=1) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 0f970850e8..80d76bf9d7 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,6 +474,15 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): + """Returns user id from threepid + + Args: + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + Deferred[str|None]: user id or None if no user id/threepid mapping exists + """ user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address @@ -481,6 +490,16 @@ class RegistrationStore(RegistrationWorkerStore, defer.returnValue(user_id) def get_user_id_by_threepid_txn(self, txn, medium, address): + """Returns user id from threepid + + Args: + txn (cursor): + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + str|None: user id or None if no user id/threepid mapping exists + """ ret = self._simple_select_one_txn( txn, "user_threepids", diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0c17745ae7..832e379a83 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -54,7 +54,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.store.user_add_threepid(user2, "email", user2_email, now, now) self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() @@ -203,7 +203,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): ] self.hs.config.mau_limits_reserved_threepids = threepids self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() -- cgit 1.5.1 From f8fe98812be74e37432f11508c07488079bf949d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 14:58:59 +0100 Subject: improve comments --- synapse/storage/monthly_active_users.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 9a5c3b7eda..01963c879f 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -184,18 +184,18 @@ class MonthlyActiveUsersStore(SQLBaseStore): "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - # Considered pushing cache invalidation down into txn method, but - # did not because txn is not a LoggingTransaction. This means I could not - # call txn.call_after(). Therefore cache is altered in background thread - # and calls from elsewhere to user_last_seen_monthly_active and - # get_monthly_active_count fail with ValueError in - # synapse/util/caches/descriptors.py#check_thread + if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary + to invalidate the caches on user_last_seen_monthly_active and + get_monthly_active_count. We can't do that here, because we are running + in a database thread rather than the main thread, and we can't call + txn.call_after because txn may not be a LoggingTransaction. Args: txn (cursor): -- cgit 1.5.1 From e5481b22aac800b016e05f50a50ced85a226b364 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 15:25:21 +0100 Subject: Use allow/deny --- synapse/config/room_directory.py | 12 ++++++------ tests/config/test_room_directory.py | 8 ++++---- tests/handlers/test_directory.py | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'synapse') diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 2ca010afdb..9da13ab11b 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -35,13 +35,13 @@ class RoomDirectoryConfig(Config): # The format of this option is a list of rules that contain globs that # match against user_id and the new alias (fully qualified with server # name). The action in the first rule that matches is taken, which can - # currently either be "allowed" or "denied". + # currently either be "allow" or "deny". # # If no rules match the request is denied. alias_creation_rules: - user_id: "*" alias: "*" - action: allowed + action: allow """ def is_alias_creation_allowed(self, user_id, alias): @@ -56,7 +56,7 @@ class RoomDirectoryConfig(Config): """ for rule in self._alias_creation_rules: if rule.matches(user_id, alias): - return rule.action == "allowed" + return rule.action == "allow" return False @@ -67,12 +67,12 @@ class _AliasRule(object): user_id = rule["user_id"] alias = rule["alias"] - if action in ("allowed", "denied"): + if action in ("allow", "deny"): self.action = action else: raise ConfigError( - "alias_creation_rules rules can only have action of 'allowed'" - " or 'denied'" + "alias_creation_rules rules can only have action of 'allow'" + " or 'deny'" ) try: diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 75021a5f04..f37a17d618 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -26,16 +26,16 @@ class RoomDirectoryConfigTestCase(unittest.TestCase): alias_creation_rules: - user_id: "*bob*" alias: "*" - action: "denied" + action: "deny" - user_id: "*" alias: "#unofficial_*" - action: "allowed" + action: "allow" - user_id: "@foo*:example.com" alias: "*" - action: "allowed" + action: "allow" - user_id: "@gah:example.com" alias: "#goo:example.com" - action: "allowed" + action: "allow" """) rd_config = RoomDirectoryConfig() diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 4f299b74ba..8ae6556c0a 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -118,7 +118,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): { "user_id": "*", "alias": "#unofficial_*", - "action": "allowed", + "action": "allow", } ] -- cgit 1.5.1 From fcbd488e9a60a70a81cd7a94a6adebe6e06c525a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 16:13:43 +0100 Subject: add new line --- synapse/storage/monthly_active_users.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 01963c879f..cf4104dc2e 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -191,6 +191,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary to invalidate the caches on user_last_seen_monthly_active and get_monthly_active_count. We can't do that here, because we are running -- cgit 1.5.1 From cb53ce9d6429252d5ee012f5a476cc834251c27d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 17:49:55 +0100 Subject: Refactor state group lookup to reduce DB hits (#4011) Currently when fetching state groups from the data store we make two hits two the database: once for members and once for non-members (unless request is filtered to one or the other). This adds needless load to the datbase, so this PR refactors the lookup to make only a single database hit. --- changelog.d/4011.misc | 1 + synapse/handlers/initial_sync.py | 4 +- synapse/handlers/message.py | 20 +- synapse/handlers/pagination.py | 15 +- synapse/handlers/room.py | 22 +- synapse/handlers/sync.py | 97 ++--- synapse/rest/client/v1/room.py | 3 +- synapse/storage/events.py | 2 +- synapse/storage/state.py | 845 ++++++++++++++++++++++++--------------- synapse/visibility.py | 15 +- tests/storage/test_state.py | 175 +++++--- 11 files changed, 714 insertions(+), 485 deletions(-) create mode 100644 changelog.d/4011.misc (limited to 'synapse') diff --git a/changelog.d/4011.misc b/changelog.d/4011.misc new file mode 100644 index 0000000000..ad7768c4cd --- /dev/null +++ b/changelog.d/4011.misc @@ -0,0 +1 @@ +Reduce database load when fetching state groups diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index e009395207..563bb3cea3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler): room_end_token = "s%d" % (event.stream_ordering,) deferred_room_state = run_in_background( self.store.get_state_for_events, - [event.event_id], None, + [event.event_id], ) deferred_room_state.addCallback( lambda states: states[event.event_id] @@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler): def _room_initial_sync_parted(self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking): room_state = yield self.store.get_state_for_events( - [member_event_id], None + [member_event_id], ) room_state = room_state[member_event_id] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6c4fcfb10a..969e588e73 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -35,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.replication.http.send_event import ReplicationSendEventRestServlet +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID from synapse.util.async_helpers import Linearizer from synapse.util.frozenutils import frozendict_json_encoder @@ -80,7 +81,7 @@ class MessageHandler(object): elif membership == Membership.LEAVE: key = (event_type, state_key) room_state = yield self.store.get_state_for_events( - [membership_event_id], [key] + [membership_event_id], StateFilter.from_types([key]) ) data = room_state[membership_event_id].get(key) @@ -88,7 +89,7 @@ class MessageHandler(object): @defer.inlineCallbacks def get_state_events( - self, user_id, room_id, types=None, filtered_types=None, + self, user_id, room_id, state_filter=StateFilter.all(), at_token=None, is_guest=False, ): """Retrieve all state events for a given room. If the user is @@ -100,13 +101,8 @@ class MessageHandler(object): Args: user_id(str): The user requesting state events. room_id(str): The room ID to get all state events from. - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. at_token(StreamToken|None): the stream token of the at which we are requesting the stats. If the user is not allowed to view the state as of that stream token, we raise a 403 SynapseError. If None, returns the current @@ -139,7 +135,7 @@ class MessageHandler(object): event = last_events[0] if visible_events: room_state = yield self.store.get_state_for_events( - [event.event_id], types, filtered_types=filtered_types, + [event.event_id], state_filter=state_filter, ) room_state = room_state[event.event_id] else: @@ -158,12 +154,12 @@ class MessageHandler(object): if membership == Membership.JOIN: state_ids = yield self.store.get_filtered_current_state_ids( - room_id, types, filtered_types=filtered_types, + room_id, state_filter=state_filter, ) room_state = yield self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: room_state = yield self.store.get_state_for_events( - [membership_event_id], types, filtered_types=filtered_types, + [membership_event_id], state_filter=state_filter, ) room_state = room_state[membership_event_id] diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a155b6e938..43f81bd607 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -21,6 +21,7 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock from synapse.util.logcontext import run_in_background @@ -255,16 +256,14 @@ class PaginationHandler(object): if event_filter and event_filter.lazy_load_members(): # TODO: remove redundant members - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in events - ) - ] + # FIXME: we also care about invite targets etc. + state_filter = StateFilter.from_types( + (EventTypes.Member, event.sender) + for event in events + ) state_ids = yield self.store.get_state_ids_for_event( - events[0].event_id, types=types, + events[0].event_id, state_filter=state_filter, ) if state_ids: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab1571b27b..3ba92bdb4c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -33,6 +33,7 @@ from synapse.api.constants import ( RoomCreationPreset, ) from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils from synapse.visibility import filter_events_for_client @@ -489,23 +490,24 @@ class RoomContextHandler(object): else: last_event_id = event_id - types = None - filtered_types = None if event_filter and event_filter.lazy_load_members(): - members = set(ev.sender for ev in itertools.chain( - results["events_before"], - (results["event"],), - results["events_after"], - )) - filtered_types = [EventTypes.Member] - types = [(EventTypes.Member, member) for member in members] + state_filter = StateFilter.from_lazy_load_member_list( + ev.sender + for ev in itertools.chain( + results["events_before"], + (results["event"],), + results["events_after"], + ) + ) + else: + state_filter = StateFilter.all() # XXX: why do we return the state as of the last event rather than the # first? Shouldn't we be consistent with /sync? # https://github.com/matrix-org/matrix-doc/issues/687 state = yield self.store.get_state_for_events( - [last_event_id], types, filtered_types=filtered_types, + [last_event_id], state_filter=state_filter, ) results["state"] = list(state[last_event_id].values()) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 351892a94f..09739f2862 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -27,6 +27,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -469,25 +470,20 @@ class SyncHandler(object): )) @defer.inlineCallbacks - def get_state_after_event(self, event, types=None, filtered_types=None): + def get_state_after_event(self, event, state_filter=StateFilter.all()): """ Get the room state after the given event Args: event(synapse.events.EventBase): event of interest - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) """ state_ids = yield self.store.get_state_ids_for_event( - event.event_id, types, filtered_types=filtered_types, + event.event_id, state_filter=state_filter, ) if event.is_state(): state_ids = state_ids.copy() @@ -495,18 +491,14 @@ class SyncHandler(object): defer.returnValue(state_ids) @defer.inlineCallbacks - def get_state_at(self, room_id, stream_position, types=None, filtered_types=None): + def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): """ Get the room state at a particular stream position Args: room_id(str): room for which to get state stream_position(StreamToken): point at which to get state - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) @@ -522,7 +514,7 @@ class SyncHandler(object): if last_events: last_event = last_events[-1] state = yield self.get_state_after_event( - last_event, types, filtered_types=filtered_types, + last_event, state_filter=state_filter, ) else: @@ -563,10 +555,11 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( - last_event.event_id, [ + last_event.event_id, + state_filter=StateFilter.from_types([ (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), - ] + ]), ) # this is heavily cached, thus: fast. @@ -717,8 +710,7 @@ class SyncHandler(object): with Measure(self.clock, "compute_state_delta"): - types = None - filtered_types = None + members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( @@ -729,16 +721,21 @@ class SyncHandler(object): # We only request state for the members needed to display the # timeline: - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in batch.events - ) - ] + members_to_fetch = set( + event.sender # FIXME: we also care about invite targets etc. + for event in batch.events + ) + + if full_state: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + members_to_fetch.add(sync_config.user.to_string()) - # only apply the filtering to room members - filtered_types = [EventTypes.Member] + state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) + else: + state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id @@ -746,28 +743,19 @@ class SyncHandler(object): } if full_state: - if lazy_load_members: - # always make sure we LL ourselves so we know we're in the room - # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 - # We only need apply this on full state syncs given we disabled - # LL for incr syncs in #3840. - types.append((EventTypes.Member, sync_config.user.to_string())) - if batch: current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) else: current_state_ids = yield self.get_state_at( - room_id, stream_position=now_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=now_token, + state_filter=state_filter, ) state_ids = current_state_ids @@ -781,8 +769,7 @@ class SyncHandler(object): ) elif batch.limited: state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) # for now, we disable LL for gappy syncs - see @@ -797,17 +784,15 @@ class SyncHandler(object): # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). - types = None - filtered_types = None + state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=since_token, + state_filter=state_filter, ) current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = _calculate_state( @@ -821,7 +806,7 @@ class SyncHandler(object): else: state_ids = {} if lazy_load_members: - if types and batch.events: + if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. @@ -831,8 +816,12 @@ class SyncHandler(object): # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=None, # we only want members! + batch.events[0].event_id, + # we only want members! + state_filter=StateFilter.from_types( + (EventTypes.Member, member) + for member in members_to_fetch + ), ) if lazy_load_members and not include_redundant_members: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 663934efd0..fcfe7857f6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -33,6 +33,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID @@ -409,7 +410,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet): room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, - types=[(EventTypes.Member, None)], + state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) chunk = [] diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c780f55277..8881b009df 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2089,7 +2089,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore for sg in remaining_state_groups: logger.info("[purge] de-delta-ing remaining state group %s", sg) curr_state = self._get_state_groups_from_groups_txn( - txn, [sg], types=None + txn, [sg], ) curr_state = curr_state[sg] diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c4..ef65929bb2 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -19,6 +19,8 @@ from collections import namedtuple from six import iteritems, itervalues from six.moves import range +import attr + from twisted.internet import defer from synapse.api.constants import EventTypes @@ -48,6 +50,318 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt return len(self.delta_ids) if self.delta_ids else 0 +@attr.s(slots=True) +class StateFilter(object): + """A filter used when querying for state. + + Attributes: + types (dict[str, set[str]|None]): Map from type to set of state keys (or + None). This specifies which state_keys for the given type to fetch + from the DB. If None then all events with that type are fetched. If + the set is empty then no events with that type are fetched. + include_others (bool): Whether to fetch events with types that do not + appear in `types`. + """ + + types = attr.ib() + include_others = attr.ib(default=False) + + def __attrs_post_init__(self): + # If `include_others` is set we canonicalise the filter by removing + # wildcards from the types dictionary + if self.include_others: + self.types = { + k: v for k, v in iteritems(self.types) + if v is not None + } + + @staticmethod + def all(): + """Creates a filter that fetches everything. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=True) + + @staticmethod + def none(): + """Creates a filter that fetches nothing. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=False) + + @staticmethod + def from_types(types): + """Creates a filter that only fetches the given types + + Args: + types (Iterable[tuple[str, str|None]]): A list of type and state + keys to fetch. A state_key of None fetches everything for + that type + + Returns: + StateFilter + """ + type_dict = {} + for typ, s in types: + if typ in type_dict: + if type_dict[typ] is None: + continue + + if s is None: + type_dict[typ] = None + continue + + type_dict.setdefault(typ, set()).add(s) + + return StateFilter(types=type_dict) + + @staticmethod + def from_lazy_load_member_list(members): + """Creates a filter that returns all non-member events, plus the member + events for the given users + + Args: + members (iterable[str]): Set of user IDs + + Returns: + StateFilter + """ + return StateFilter( + types={EventTypes.Member: set(members)}, + include_others=True, + ) + + def return_expanded(self): + """Creates a new StateFilter where type wild cards have been removed + (except for memberships). The returned filter is a superset of the + current one, i.e. anything that passes the current filter will pass + the returned filter. + + This helps the caching as the DictionaryCache knows if it has *all* the + state, but does not know if it has all of the keys of a particular type, + which makes wildcard lookups expensive unless we have a complete cache. + Hence, if we are doing a wildcard lookup, populate the cache fully so + that we can do an efficient lookup next time. + + Note that since we have two caches, one for membership events and one for + other events, we can be a bit more clever than simply returning + `StateFilter.all()` if `has_wildcards()` is True. + + We return a StateFilter where: + 1. the list of membership events to return is the same + 2. if there is a wildcard that matches non-member events we + return all non-member events + + Returns: + StateFilter + """ + + if self.is_full(): + # If we're going to return everything then there's nothing to do + return self + + if not self.has_wildcards(): + # If there are no wild cards, there's nothing to do + return self + + if EventTypes.Member in self.types: + get_all_members = self.types[EventTypes.Member] is None + else: + get_all_members = self.include_others + + has_non_member_wildcard = self.include_others or any( + state_keys is None + for t, state_keys in iteritems(self.types) + if t != EventTypes.Member + ) + + if not has_non_member_wildcard: + # If there are no non-member wild cards we can just return ourselves + return self + + if get_all_members: + # We want to return everything. + return StateFilter.all() + else: + # We want to return all non-members, but only particular + # memberships + return StateFilter( + types={EventTypes.Member: self.types[EventTypes.Member]}, + include_others=True, + ) + + def make_sql_filter_clause(self): + """Converts the filter to an SQL clause. + + For example: + + f = StateFilter.from_types([("m.room.create", "")]) + clause, args = f.make_sql_filter_clause() + clause == "(type = ? AND state_key = ?)" + args == ['m.room.create', ''] + + + Returns: + tuple[str, list]: The SQL string (may be empty) and arguments. An + empty SQL string is returned when the filter matches everything + (i.e. is "full"). + """ + + where_clause = "" + where_args = [] + + if self.is_full(): + return where_clause, where_args + + if not self.include_others and not self.types: + # i.e. this is an empty filter, so we need to return a clause that + # will match nothing + return "1 = 2", [] + + # First we build up a lost of clauses for each type/state_key combo + clauses = [] + for etype, state_keys in iteritems(self.types): + if state_keys is None: + clauses.append("(type = ?)") + where_args.append(etype) + continue + + for state_key in state_keys: + clauses.append("(type = ? AND state_key = ?)") + where_args.extend((etype, state_key)) + + # This will match anything that appears in `self.types` + where_clause = " OR ".join(clauses) + + # If we want to include stuff that's not in the types dict then we add + # a `OR type NOT IN (...)` clause to the end. + if self.include_others: + if where_clause: + where_clause += " OR " + + where_clause += "type NOT IN (%s)" % ( + ",".join(["?"] * len(self.types)), + ) + where_args.extend(self.types) + + return where_clause, where_args + + def max_entries_returned(self): + """Returns the maximum number of entries this filter will return if + known, otherwise returns None. + + For example a simple state filter asking for `("m.room.create", "")` + will return 1, whereas the default state filter will return None. + + This is used to bail out early if the right number of entries have been + fetched. + """ + if self.has_wildcards(): + return None + + return len(self.concrete_types()) + + def filter_state(self, state_dict): + """Returns the state filtered with by this StateFilter + + Args: + state (dict[tuple[str, str], Any]): The state map to filter + + Returns: + dict[tuple[str, str], Any]: The filtered state map + """ + if self.is_full(): + return dict(state_dict) + + filtered_state = {} + for k, v in iteritems(state_dict): + typ, state_key = k + if typ in self.types: + state_keys = self.types[typ] + if state_keys is None or state_key in state_keys: + filtered_state[k] = v + elif self.include_others: + filtered_state[k] = v + + return filtered_state + + def is_full(self): + """Whether this filter fetches everything or not + + Returns: + bool + """ + return self.include_others and not self.types + + def has_wildcards(self): + """Whether the filter includes wildcards or is attempting to fetch + specific state. + + Returns: + bool + """ + + return ( + self.include_others + or any( + state_keys is None + for state_keys in itervalues(self.types) + ) + ) + + def concrete_types(self): + """Returns a list of concrete type/state_keys (i.e. not None) that + will be fetched. This will be a complete list if `has_wildcards` + returns False, but otherwise will be a subset (or even empty). + + Returns: + list[tuple[str,str]] + """ + return [ + (t, s) + for t, state_keys in iteritems(self.types) + if state_keys is not None + for s in state_keys + ] + + def get_member_split(self): + """Return the filter split into two: one which assumes it's exclusively + matching against member state, and one which assumes it's matching + against non member state. + + This is useful due to the returned filters giving correct results for + `is_full()`, `has_wildcards()`, etc, when operating against maps that + either exclusively contain member events or only contain non-member + events. (Which is the case when dealing with the member vs non-member + state caches). + + Returns: + tuple[StateFilter, StateFilter]: The member and non member filters + """ + + if EventTypes.Member in self.types: + state_keys = self.types[EventTypes.Member] + if state_keys is None: + member_filter = StateFilter.all() + else: + member_filter = StateFilter({EventTypes.Member: state_keys}) + elif self.include_others: + member_filter = StateFilter.all() + else: + member_filter = StateFilter.none() + + non_member_filter = StateFilter( + types={k: v for k, v in iteritems(self.types) if k != EventTypes.Member}, + include_others=self.include_others, + ) + + return member_filter, non_member_filter + + # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers. @@ -152,61 +466,41 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # FIXME: how should this be cached? - def get_filtered_current_state_ids(self, room_id, types, filtered_types=None): + def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()): """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result of doing a fresh state resolution as per state_handler.get_current_state + Args: room_id (str) - types (list[(Str, (Str|None))]): List of (type, state_key) tuples - which are used to filter the state fetched. `state_key` may be - None, which matches any `state_key` - filtered_types (list[Str]|None): List of types to apply the above filter to. + state_filter (StateFilter): The state filter used to fetch state + from the database. + Returns: - deferred: dict of (type, state_key) -> event + Deferred[dict[tuple[str, str], str]]: Map from type/state_key to + event ID. """ - include_other_types = False if filtered_types is None else True - def _get_filtered_current_state_ids_txn(txn): results = {} - sql = """SELECT type, state_key, event_id FROM current_state_events - WHERE room_id = ? %s""" - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] - - if include_other_types: - unique_types = set(filtered_types) - clause_to_args.append( - ( - "AND type <> ? " * len(unique_types), - list(unique_types) - ) - ) - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - for where_clause, where_args in clause_to_args: - args = [room_id] - args.extend(where_args) - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (intern_string(typ), intern_string(state_key)) - results[key] = event_id + sql = """ + SELECT type, state_key, event_id FROM current_state_events + WHERE room_id = ? + """ + + where_clause, where_args = state_filter.make_sql_filter_clause() + + if where_clause: + sql += " AND (%s)" % (where_clause,) + + args = [room_id] + args.extend(where_args) + txn.execute(sql, args) + for row in txn: + typ, state_key, event_id = row + key = (intern_string(typ), intern_string(state_key)) + results[key] = event_id + return results return self.runInteraction( @@ -322,20 +616,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): }) @defer.inlineCallbacks - def _get_state_groups_from_groups(self, groups, types, members=None): + def _get_state_groups_from_groups(self, groups, state_filter): """Returns the state groups for a given set of groups, filtering on types of state events. Args: groups(list[int]): list of state group IDs to query - types (Iterable[str, str|None]|None): list of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. If None, all types are returned. - members (bool|None): If not None, then, in addition to any filtering - implied by types, the results are also filtered to only include - member events (if True), or to exclude member events (if False) - - Returns: + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) @@ -346,19 +634,23 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for chunk in chunks: res = yield self.runInteraction( "_get_state_groups_from_groups", - self._get_state_groups_from_groups_txn, chunk, types, members, + self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) defer.returnValue(results) def _get_state_groups_from_groups_txn( - self, txn, groups, types=None, members=None, + self, txn, groups, state_filter=StateFilter.all(), ): results = {group: {} for group in groups} - if types is not None: - types = list(set(types)) # deduplicate types list + where_clause, where_args = state_filter.make_sql_filter_clause() + + # Unless the filter clause is empty, we're going to append it after an + # existing where clause + if where_clause: + where_clause = " AND (%s)" % (where_clause,) if isinstance(self.database_engine, PostgresEngine): # Temporarily disable sequential scans in this transaction. This is @@ -374,79 +666,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # group for the given type, state_key. # This may return multiple rows per (type, state_key), but last_value # should be the same. - sql = (""" + sql = """ WITH RECURSIVE state(state_group) AS ( VALUES(?::bigint) UNION ALL SELECT prev_state_group FROM state_group_edges e, state s WHERE s.state_group = e.state_group ) - SELECT type, state_key, last_value(event_id) OVER ( + SELECT DISTINCT type, state_key, last_value(event_id) OVER ( PARTITION BY type, state_key ORDER BY state_group ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) AS event_id FROM state_groups_state WHERE state_group IN ( SELECT state_group FROM state ) - %s - """) + """ - if members is True: - sql += " AND type = '%s'" % (EventTypes.Member,) - elif members is False: - sql += " AND type <> '%s'" % (EventTypes.Member,) - - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types is not None: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - - for where_clause, where_args in clause_to_args: - for group in groups: - args = [group] - args.extend(where_args) + for group in groups: + args = [group] + args.extend(where_args) - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (typ, state_key) - results[group][key] = event_id + txn.execute(sql + where_clause, args) + for row in txn: + typ, state_key, event_id = row + key = (typ, state_key) + results[group][key] = event_id else: - where_args = [] - where_clauses = [] - wildcard_types = False - if types is not None: - for typ in types: - if typ[1] is None: - where_clauses.append("(type = ?)") - where_args.append(typ[0]) - wildcard_types = True - else: - where_clauses.append("(type = ? AND state_key = ?)") - where_args.extend([typ[0], typ[1]]) - - where_clause = "AND (%s)" % (" OR ".join(where_clauses)) - else: - where_clause = "" - - if members is True: - where_clause += " AND type = '%s'" % EventTypes.Member - elif members is False: - where_clause += " AND type <> '%s'" % EventTypes.Member + max_entries_returned = state_filter.max_entries_returned() # We don't use WITH RECURSIVE on sqlite3 as there are distributions # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) @@ -460,12 +706,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # without the right indices (which we can't add until # after we finish deduping state, which requires this func) args = [next_group] - if types: - args.extend(where_args) + args.extend(where_args) txn.execute( "SELECT type, state_key, event_id FROM state_groups_state" - " WHERE state_group = ? %s" % (where_clause,), + " WHERE state_group = ? " + where_clause, args ) results[group].update( @@ -481,9 +726,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # wildcards (i.e. Nones) in which case we have to do an exhaustive # search if ( - types is not None and - not wildcard_types and - len(results[group]) == len(types) + max_entries_returned is not None and + len(results[group]) == max_entries_returned ): break @@ -498,20 +742,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return results @defer.inlineCallbacks - def get_state_for_events(self, event_ids, types, filtered_types=None): + def get_state_for_events(self, event_ids, state_filter=StateFilter.all()): """Given a list of event_ids and type tuples, return a list of state - dicts for each event. The state dicts will only have the type/state_keys - that are in the `types` list. + dicts for each event. Args: event_ids (list[string]) - types (list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: deferred: A dict of (event_id) -> (type, state_key) -> [state_events] @@ -521,7 +759,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) state_event_map = yield self.get_events( [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)], @@ -540,20 +778,15 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None): + def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): """ Get the state dicts corresponding to a list of events, containing the event_ids of the state events (as opposed to the events themselves) Args: event_ids(list(str)): events whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from event_id -> (type, state_key) -> event_id @@ -563,7 +796,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) event_to_state = { event_id: group_to_state[group] @@ -573,45 +806,35 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_for_event(self, event_id, types=None, filtered_types=None): + def get_state_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @defer.inlineCallbacks - def get_state_ids_for_event(self, event_id, types=None, filtered_types=None): + def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_ids_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @cached(max_entries=50000) @@ -642,18 +865,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) - def _get_some_state_from_cache(self, cache, group, types, filtered_types=None): + def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to use group(int): The state group to lookup - types(list[str, str|None]): List of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating if we successfully retrieved all @@ -662,124 +881,102 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ is_all, known_absent, state_dict_ids = cache.get(group) - type_to_key = {} + if is_all or state_filter.is_full(): + # Either we have everything or want everything, either way + # `is_all` tells us whether we've gotten everything. + return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our requested types are missing from the cache missing_types = False - for typ, state_key in types: - key = (typ, state_key) - - if ( - state_key is None or - (filtered_types is not None and typ not in filtered_types) - ): - type_to_key[typ] = None - # we mark the type as missing from the cache because - # when the cache was populated it might have been done with a - # restricted set of state_keys, so the wildcard will not work - # and the cache may be incomplete. - missing_types = True - else: - if type_to_key.get(typ, object()) is not None: - type_to_key.setdefault(typ, set()).add(state_key) - + if state_filter.has_wildcards(): + # We don't know if we fetched all the state keys for the types in + # the filter that are wildcards, so we have to assume that we may + # have missed some. + missing_types = True + else: + # There aren't any wild cards, so `concrete_types()` returns the + # complete list of event types we're wanting. + for key in state_filter.concrete_types(): if key not in state_dict_ids and key not in known_absent: missing_types = True + break - sentinel = object() - - def include(typ, state_key): - valid_state_keys = type_to_key.get(typ, sentinel) - if valid_state_keys is sentinel: - return filtered_types is not None and typ not in filtered_types - if valid_state_keys is None: - return True - if state_key in valid_state_keys: - return True - return False - - got_all = is_all - if not got_all: - # the cache is incomplete. We may still have got all the results we need, if - # we don't have any wildcards in the match list. - if not missing_types and filtered_types is None: - got_all = True - - return { - k: v for k, v in iteritems(state_dict_ids) - if include(k[0], k[1]) - }, got_all - - def _get_all_state_from_cache(self, cache, group): - """Checks if group is in cache. See `_get_state_for_groups` - - Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool - indicating if we successfully retrieved all requests state from the - cache, if False we need to query the DB for the missing state. - - Args: - cache(DictionaryCache): the state group cache to use - group: The state group to lookup - """ - is_all, _, state_dict_ids = cache.get(group) - - return state_dict_ids, is_all + return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks - def _get_state_for_groups(self, groups, types=None, filtered_types=None): + def _get_state_for_groups(self, groups, state_filter=StateFilter.all()): """Gets the state at each of a list of state groups, optionally filtering by type/state_key Args: groups (iterable[int]): list of state groups for which we want to get the state. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. - + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) """ - if types is not None: - non_member_types = [t for t in types if t[0] != EventTypes.Member] - if filtered_types is not None and EventTypes.Member not in filtered_types: - # we want all of the membership events - member_types = None - else: - member_types = [t for t in types if t[0] == EventTypes.Member] - - else: - non_member_types = None - member_types = None + member_filter, non_member_filter = state_filter.get_member_split() - non_member_state = yield self._get_state_for_groups_using_cache( - groups, self._state_group_cache, non_member_types, filtered_types, + # Now we look them up in the member and non-member caches + non_member_state, incomplete_groups_nm, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_cache, + state_filter=non_member_filter, + ) ) - # XXX: we could skip this entirely if member_types is [] - member_state = yield self._get_state_for_groups_using_cache( - # we set filtered_types=None as member_state only ever contain members. - groups, self._state_group_members_cache, member_types, None, + + member_state, incomplete_groups_m, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_members_cache, + state_filter=member_filter, + ) ) - state = non_member_state + state = dict(non_member_state) for group in groups: state[group].update(member_state[group]) + # Now fetch any missing groups from the database + + incomplete_groups = incomplete_groups_m | incomplete_groups_nm + + if not incomplete_groups: + defer.returnValue(state) + + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence + + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + group_to_state_dict = yield self._get_state_groups_from_groups( + list(incomplete_groups), + state_filter=db_state_filter, + ) + + # Now lets update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # And finally update the result dict, by filtering out any extra + # stuff we pulled out of the database. + for group, group_state_dict in iteritems(group_to_state_dict): + # We just replace any existing entries, as we will have loaded + # everything we need from the database anyway. + state[group] = state_filter.filter_state(group_state_dict) + defer.returnValue(state) - @defer.inlineCallbacks def _get_state_for_groups_using_cache( - self, groups, cache, types=None, filtered_types=None + self, groups, cache, state_filter, ): """Gets the state at each of a list of state groups, optionally filtering by type/state_key, querying from a specific cache. @@ -790,89 +987,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): cache (DictionaryCache): the cache of group ids to state dicts which we will pass through - either the normal state cache or the specific members state cache. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: - Deferred[dict[int, dict[tuple[str, str], str]]]: - dict of state_group_id -> (dict of (type, state_key) -> event id) + tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of + dict of state_group_id -> (dict of (type, state_key) -> event id) + of entries in the cache, and the state group ids either missing + from the cache or incomplete. """ - if types: - types = frozenset(types) results = {} - missing_groups = [] - if types is not None: - for group in set(groups): - state_dict_ids, got_all = self._get_some_state_from_cache( - cache, group, types, filtered_types - ) - results[group] = state_dict_ids + incomplete_groups = set() + for group in set(groups): + state_dict_ids, got_all = self._get_state_for_group_using_cache( + cache, group, state_filter + ) + results[group] = state_dict_ids - if not got_all: - missing_groups.append(group) - else: - for group in set(groups): - state_dict_ids, got_all = self._get_all_state_from_cache( - cache, group - ) + if not got_all: + incomplete_groups.add(group) - results[group] = state_dict_ids + return results, incomplete_groups - if not got_all: - missing_groups.append(group) + def _insert_into_cache(self, group_to_state_dict, state_filter, + cache_seq_num_members, cache_seq_num_non_members): + """Inserts results from querying the database into the relevant cache. - if missing_groups: - # Okay, so we have some missing_types, let's fetch them. - cache_seq_num = cache.sequence + Args: + group_to_state_dict (dict): The new entries pulled from database. + Map from state group to state dict + state_filter (StateFilter): The state filter used to fetch state + from the database. + cache_seq_num_members (int): Sequence number of member cache since + last lookup in cache + cache_seq_num_non_members (int): Sequence number of member cache since + last lookup in cache + """ - # the DictionaryCache knows if it has *all* the state, but - # does not know if it has all of the keys of a particular type, - # which makes wildcard lookups expensive unless we have a complete - # cache. Hence, if we are doing a wildcard lookup, populate the - # cache fully so that we can do an efficient lookup next time. + # We need to work out which types we've fetched from the DB for the + # member vs non-member caches. This should be as accurate as possible, + # but can be an underestimate (e.g. when we have wild cards) - if filtered_types or (types and any(k is None for (t, k) in types)): - types_to_fetch = None - else: - types_to_fetch = types + member_filter, non_member_filter = state_filter.get_member_split() + if member_filter.is_full(): + # We fetched all member events + member_types = None + else: + # `concrete_types()` will only return a subset when there are wild + # cards in the filter, but that's fine. + member_types = member_filter.concrete_types() - group_to_state_dict = yield self._get_state_groups_from_groups( - missing_groups, types_to_fetch, cache == self._state_group_members_cache, - ) + if non_member_filter.is_full(): + # We fetched all non member events + non_member_types = None + else: + non_member_types = non_member_filter.concrete_types() + + for group, group_state_dict in iteritems(group_to_state_dict): + state_dict_members = {} + state_dict_non_members = {} - for group, group_state_dict in iteritems(group_to_state_dict): - state_dict = results[group] - - # update the result, filtering by `types`. - if types: - for k, v in iteritems(group_state_dict): - (typ, _) = k - if ( - (k in types or (typ, None) in types) or - (filtered_types and typ not in filtered_types) - ): - state_dict[k] = v + for k, v in iteritems(group_state_dict): + if k[0] == EventTypes.Member: + state_dict_members[k] = v else: - state_dict.update(group_state_dict) - - # update the cache with all the things we fetched from the - # database. - cache.update( - cache_seq_num, - key=group, - value=group_state_dict, - fetched_keys=types_to_fetch, - ) + state_dict_non_members[k] = v - defer.returnValue(results) + self._state_group_members_cache.update( + cache_seq_num_members, + key=group, + value=state_dict_members, + fetched_keys=member_types, + ) + + self._state_group_cache.update( + cache_seq_num_non_members, + key=group, + value=state_dict_non_members, + fetched_keys=non_member_types, + ) def store_state_group(self, event_id, room_id, prev_group, delta_ids, current_state_ids): @@ -1181,12 +1374,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): continue prev_state = self._get_state_groups_from_groups_txn( - txn, [prev_group], types=None + txn, [prev_group], ) prev_state = prev_state[prev_group] curr_state = self._get_state_groups_from_groups_txn( - txn, [state_group], types=None + txn, [state_group], ) curr_state = curr_state[state_group] diff --git a/synapse/visibility.py b/synapse/visibility.py index 43f48196be..0281a7c919 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events.utils import prune_event +from synapse.storage.state import StateFilter from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -72,7 +73,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False, ) event_id_to_state = yield store.get_state_for_events( frozenset(e.event_id for e in events), - types=types, + state_filter=StateFilter.from_types(types), ) ignore_dict_content = yield store.get_global_account_data_by_type_for_user( @@ -273,8 +274,8 @@ def filter_events_for_server(store, server_name, events): # need to check membership (as we know the server is in the room). event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), + state_filter=StateFilter.from_types( + types=((EventTypes.RoomHistoryVisibility, ""),), ) ) @@ -314,9 +315,11 @@ def filter_events_for_server(store, server_name, events): # of the history vis and membership state at those events. event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.Member, None), + state_filter=StateFilter.from_types( + types=( + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.Member, None), + ), ) ) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index b9c5b39d59..086a39d834 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.state import StateFilter from synapse.types import RoomID, UserID import tests.unittest @@ -148,7 +149,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we get the full state as of the final event state = yield self.store.get_state_for_event( - e5.event_id, None, filtered_types=None + e5.event_id, ) self.assertIsNotNone(e4) @@ -166,33 +167,35 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we can filter to the m.room.name event (with a '' state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, '')], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, '')]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can filter to the m.room.name event (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, None)]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can grab the m.room.member events (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Member, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Member, None)]) ) self.assertStateMapEqual( {(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state ) - # check we can use filtered_types to grab a specific room member - # without filtering out the other event types + # check we can grab a specific room member without filtering out the + # other event types state = yield self.store.get_state_for_event( e5.event_id, - [(EventTypes.Member, self.u_alice.to_string())], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {self.u_alice.to_string()}}, + include_others=True, + ) ) self.assertStateMapEqual( @@ -204,10 +207,12 @@ class StateStoreTestCase(tests.unittest.TestCase): state, ) - # check that types=[], filtered_types=[EventTypes.Member] - # doesn't return all members + # check that we can grab everything except members state = yield self.store.get_state_for_event( - e5.event_id, [], filtered_types=[EventTypes.Member] + e5.event_id, state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertStateMapEqual( @@ -215,16 +220,21 @@ class StateStoreTestCase(tests.unittest.TestCase): ) ####################################################### - # _get_some_state_from_cache tests against a full cache + # _get_state_for_group_using_cache tests against a full cache ####################################################### room_id = self.room.to_string() group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id]) group = list(group_ids.keys())[0] - # test _get_some_state_from_cache correctly filters out members with types=[] - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -236,22 +246,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members with wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -263,11 +278,13 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -280,12 +297,15 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -297,23 +317,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) @@ -357,42 +381,54 @@ class StateStoreTestCase(tests.unittest.TestCase): ############################################ # test that things work with a partial cache - # test _get_some_state_from_cache correctly filters out members with types=[] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -404,44 +440,53 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) -- cgit 1.5.1 From 77d70a7646bd30315393b9aec44e452d5739a266 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 26 Oct 2018 22:05:22 +1100 Subject: Port register_new_matrix_user to Python 3 and add tests (#4085) --- changelog.d/4085.feature | 1 + scripts/register_new_matrix_user | 204 +------------------------ synapse/_scripts/__init__.py | 0 synapse/_scripts/register_new_matrix_user.py | 215 +++++++++++++++++++++++++++ tests/scripts/__init__.py | 0 tests/scripts/test_new_matrix_user.py | 160 ++++++++++++++++++++ 6 files changed, 378 insertions(+), 202 deletions(-) create mode 100644 changelog.d/4085.feature create mode 100644 synapse/_scripts/__init__.py create mode 100644 synapse/_scripts/register_new_matrix_user.py create mode 100644 tests/scripts/__init__.py create mode 100644 tests/scripts/test_new_matrix_user.py (limited to 'synapse') diff --git a/changelog.d/4085.feature b/changelog.d/4085.feature new file mode 100644 index 0000000000..4bd3ddcf2c --- /dev/null +++ b/changelog.d/4085.feature @@ -0,0 +1 @@ +The register_new_matrix_user script is now ported to Python 3. diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 89143c5d59..b450712ab7 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -16,207 +16,7 @@ from __future__ import print_function -import argparse -import getpass -import hashlib -import hmac -import json -import sys -import urllib2 - -from six import input - -import yaml - - -def request_registration(user, password, server_location, shared_secret, admin=False): - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - headers={'Content-Type': 'application/json'}, - ) - - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - body = f.read() - f.close() - nonce = json.loads(body)["nonce"] - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1) - - mac.update(nonce) - mac.update("\x00") - mac.update(user) - mac.update("\x00") - mac.update(password) - mac.update("\x00") - mac.update("admin" if admin else "notadmin") - - mac = mac.hexdigest() - - data = { - "nonce": nonce, - "username": user, - "password": password, - "mac": mac, - "admin": admin, - } - - server_location = server_location.rstrip("/") - - print("Sending registration request...") - - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - data=json.dumps(data), - headers={'Content-Type': 'application/json'}, - ) - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - f.read() - f.close() - print("Success.") - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - -def register_new_user(user, password, server_location, shared_secret, admin): - if not user: - try: - default_user = getpass.getuser() - except Exception: - default_user = None - - if default_user: - user = input("New user localpart [%s]: " % (default_user,)) - if not user: - user = default_user - else: - user = input("New user localpart: ") - - if not user: - print("Invalid user name") - sys.exit(1) - - if not password: - password = getpass.getpass("Password: ") - - if not password: - print("Password cannot be blank.") - sys.exit(1) - - confirm_password = getpass.getpass("Confirm password: ") - - if password != confirm_password: - print("Passwords do not match") - sys.exit(1) - - if admin is None: - admin = input("Make admin [no]: ") - if admin in ("y", "yes", "true"): - admin = True - else: - admin = False - - request_registration(user, password, server_location, shared_secret, bool(admin)) - +from synapse._scripts.register_new_matrix_user import main if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" - " configured with the 'registration_shared_secret' option" - " set." - ) - parser.add_argument( - "-u", - "--user", - default=None, - help="Local part of the new user. Will prompt if omitted.", - ) - parser.add_argument( - "-p", - "--password", - default=None, - help="New password for user. Will prompt if omitted.", - ) - admin_group = parser.add_mutually_exclusive_group() - admin_group.add_argument( - "-a", - "--admin", - action="store_true", - help=( - "Register new user as an admin. " - "Will prompt if --no-admin is not set either." - ), - ) - admin_group.add_argument( - "--no-admin", - action="store_true", - help=( - "Register new user as a regular user. " - "Will prompt if --admin is not set either." - ), - ) - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - "-c", - "--config", - type=argparse.FileType('r'), - help="Path to server config file. Used to read in shared secret.", - ) - - group.add_argument( - "-k", "--shared-secret", help="Shared secret as defined in server config file." - ) - - parser.add_argument( - "server_url", - default="https://localhost:8448", - nargs='?', - help="URL to use to talk to the home server. Defaults to " - " 'https://localhost:8448'.", - ) - - args = parser.parse_args() - - if "config" in args and args.config: - config = yaml.safe_load(args.config) - secret = config.get("registration_shared_secret", None) - if not secret: - print("No 'registration_shared_secret' defined in config.") - sys.exit(1) - else: - secret = args.shared_secret - - admin = None - if args.admin or args.no_admin: - admin = args.admin - - register_new_user(args.user, args.password, args.server_url, secret, admin) + main() diff --git a/synapse/_scripts/__init__.py b/synapse/_scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py new file mode 100644 index 0000000000..70cecde486 --- /dev/null +++ b/synapse/_scripts/register_new_matrix_user.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import argparse +import getpass +import hashlib +import hmac +import logging +import sys + +from six.moves import input + +import requests as _requests +import yaml + + +def request_registration( + user, + password, + server_location, + shared_secret, + admin=False, + requests=_requests, + _print=print, + exit=sys.exit, +): + + url = "%s/_matrix/client/r0/admin/register" % (server_location,) + + # Get the nonce + r = requests.get(url, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + nonce = r.json()["nonce"] + + mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1) + + mac.update(nonce.encode('utf8')) + mac.update(b"\x00") + mac.update(user.encode('utf8')) + mac.update(b"\x00") + mac.update(password.encode('utf8')) + mac.update(b"\x00") + mac.update(b"admin" if admin else b"notadmin") + + mac = mac.hexdigest() + + data = { + "nonce": nonce, + "username": user, + "password": password, + "mac": mac, + "admin": admin, + } + + _print("Sending registration request...") + r = requests.post(url, json=data, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + _print("Success!") + + +def register_new_user(user, password, server_location, shared_secret, admin): + if not user: + try: + default_user = getpass.getuser() + except Exception: + default_user = None + + if default_user: + user = input("New user localpart [%s]: " % (default_user,)) + if not user: + user = default_user + else: + user = input("New user localpart: ") + + if not user: + print("Invalid user name") + sys.exit(1) + + if not password: + password = getpass.getpass("Password: ") + + if not password: + print("Password cannot be blank.") + sys.exit(1) + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + print("Passwords do not match") + sys.exit(1) + + if admin is None: + admin = input("Make admin [no]: ") + if admin in ("y", "yes", "true"): + admin = True + else: + admin = False + + request_registration(user, password, server_location, shared_secret, bool(admin)) + + +def main(): + + logging.captureWarnings(True) + + parser = argparse.ArgumentParser( + description="Used to register new users with a given home server when" + " registration has been disabled. The home server must be" + " configured with the 'registration_shared_secret' option" + " set." + ) + parser.add_argument( + "-u", + "--user", + default=None, + help="Local part of the new user. Will prompt if omitted.", + ) + parser.add_argument( + "-p", + "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + admin_group = parser.add_mutually_exclusive_group() + admin_group.add_argument( + "-a", + "--admin", + action="store_true", + help=( + "Register new user as an admin. " + "Will prompt if --no-admin is not set either." + ), + ) + admin_group.add_argument( + "--no-admin", + action="store_true", + help=( + "Register new user as a regular user. " + "Will prompt if --admin is not set either." + ), + ) + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + "-c", + "--config", + type=argparse.FileType('r'), + help="Path to server config file. Used to read in shared secret.", + ) + + group.add_argument( + "-k", "--shared-secret", help="Shared secret as defined in server config file." + ) + + parser.add_argument( + "server_url", + default="https://localhost:8448", + nargs='?', + help="URL to use to talk to the home server. Defaults to " + " 'https://localhost:8448'.", + ) + + args = parser.parse_args() + + if "config" in args and args.config: + config = yaml.safe_load(args.config) + secret = config.get("registration_shared_secret", None) + if not secret: + print("No 'registration_shared_secret' defined in config.") + sys.exit(1) + else: + secret = args.shared_secret + + admin = None + if args.admin or args.no_admin: + admin = args.admin + + register_new_user(args.user, args.password, args.server_url, secret, admin) + + +if __name__ == "__main__": + main() diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py new file mode 100644 index 0000000000..6f56893f5e --- /dev/null +++ b/tests/scripts/test_new_matrix_user.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from synapse._scripts.register_new_matrix_user import request_registration + +from tests.unittest import TestCase + + +class RegisterTestCase(TestCase): + def test_success(self): + """ + The script will fetch a nonce, and then generate a MAC with it, and then + post that MAC. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + r.status_code = 200 + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # We should get the success message making sure everything is OK. + self.assertIn("Success!", out) + + # sys.exit shouldn't have been called. + self.assertEqual(err_code, []) + + def test_failure_nonce(self): + """ + If the script fails to fetch a nonce, it throws an error and quits. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 404 + r.reason = "Not Found" + r.json = lambda: {"not": "error"} + return r + + requests = Mock() + requests.get = get + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 404 Not Found", out) + self.assertNotIn("Success!", out) + + def test_failure_post(self): + """ + The script will fetch a nonce, and then if the final POST fails, will + report an error and quit. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + # Then 500 because we're jerks + r.status_code = 500 + r.reason = "Broken" + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 500 Broken", out) + self.assertNotIn("Success!", out) -- cgit 1.5.1 From 2b791865c454f3bd08aa16431ef506305f712609 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 21:52:52 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 1ddbbbebfb..c8377df7b2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7" +__version__ = "0.33.8rc1" -- cgit 1.5.1 From 086e1a8f3e873351a749cb8b341cd2adbe701049 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix (limited to 'synapse') diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 0000000000..66dcfb18b9 --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd11..80e7b15de8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777ff..a824be9a62 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) -- cgit 1.5.1 From 67c1924899721e57093470d2f449f745a983b1d9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 06:27:05 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index c8377df7b2..97a57a7ac1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc1" +__version__ = "0.33.8rc2" -- cgit 1.5.1 From aa98e3889667bbbd4186f25336e1a9a5666113d2 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 1 Nov 2018 21:28:35 +1100 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 97a57a7ac1..89ea9a9775 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc2" +__version__ = "0.33.8" -- cgit 1.5.1