From 0b03a9770829247055fe8eaf66c24bb1892a3c50 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 17:56:41 +0100 Subject: Add module_loader.py --- synapse/util/module_loader.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 synapse/util/module_loader.py (limited to 'synapse/util') diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py new file mode 100644 index 0000000000..b4464790ee --- /dev/null +++ b/synapse/util/module_loader.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +from synapse.config._base import ConfigError + +def load_module(provider): + """ Loads a module with its config + Take a dict with keys 'module' (the module name) and 'config' + (the config dict). + + Returns + Tuple of (provider class, parsed config object) + """ + # We need to import the module, and then pick the class out of + # that, so we split based on the last dot. + module, clz = provider['module'].rsplit(".", 1) + module = importlib.import_module(module) + provider_class = getattr(module, clz) + + try: + provider_config = provider_class.parse_config(provider["config"]) + except Exception as e: + raise ConfigError( + "Failed to parse config for %r: %r" % (provider['module'], e) + ) + + return (provider_class, provider_config) -- cgit 1.5.1 From 9fd086e506ae3cb3db7f1b1c7317c7602a4d71e3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 17:59:46 +0100 Subject: unnecessary parens --- synapse/util/module_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index b4464790ee..4b51d7a77b 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -38,4 +38,4 @@ def load_module(provider): "Failed to parse config for %r: %r" % (provider['module'], e) ) - return (provider_class, provider_config) + return provider_class, provider_config -- cgit 1.5.1 From 8ad5f34908df99804b27bd045fde5b9d5625d784 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 19:21:41 +0100 Subject: pep8 --- synapse/util/module_loader.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/util') diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 4b51d7a77b..4288312b8a 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -17,6 +17,7 @@ import importlib from synapse.config._base import ConfigError + def load_module(provider): """ Loads a module with its config Take a dict with keys 'module' (the module name) and 'config' -- cgit 1.5.1 From 148428ce763978583da2b1d3c435ec321df45855 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 22:24:28 +0100 Subject: Fix logcontext handling for concurrently_execute Avoid preserve_context_over_deferred, which is broken. --- synapse/util/async.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/async.py b/synapse/util/async.py index 1453faf0ef..bb252f75d7 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -19,7 +19,7 @@ from twisted.internet import defer, reactor from .logcontext import ( PreserveLoggingContext, preserve_fn, preserve_context_over_deferred, ) -from synapse.util import unwrapFirstError +from synapse.util import logcontext, unwrapFirstError from contextlib import contextmanager @@ -155,7 +155,7 @@ def concurrently_execute(func, args, limit): except StopIteration: pass - return preserve_context_over_deferred(defer.gatherResults([ + return logcontext.make_deferred_yieldable(defer.gatherResults([ preserve_fn(_concurrently_execute_inner)() for _ in xrange(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) -- cgit 1.5.1 From 3cc852d339ad1cdcb0a435c76b44182bdb81dfe9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Oct 2017 17:44:42 +0100 Subject: Fancy logformatter to format exceptions better This is a bit of an experimental change at this point; the idea is to see if it helps us track down where our stack overflows are coming from by logging the stack when the exception was caught and turned into a Failure. (We'll also need https://github.com/richvdh/twisted/commit/edf27044200e74680ea67c525768e36dc9d9af2b). If we deploy this, we'll be able to enable it via the log config yaml. --- synapse/util/logformatter.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 synapse/util/logformatter.py (limited to 'synapse/util') diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py new file mode 100644 index 0000000000..60504162e9 --- /dev/null +++ b/synapse/util/logformatter.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import logging +import traceback + + +class LogFormatter(logging.Formatter): + """Log formatter which gives more detail for exceptions + + This is the same as the standard log formatter, except that when logging + exceptions [typically via log.foo("msg", exc_info=1)], it prints the + sequence that led up to the point at which the exception was caught. + (Normally only stack frames between the point the exception was raised and + where it was caught are logged). + """ + def __init__(self, *args, **kwargs): + super(LogFormatter, self).__init__(*args, **kwargs) + + def formatException(self, ei): + sio = StringIO.StringIO() + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(ei[2].tb_frame.f_back, None, sio) + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1:] == "\n": + s = s[:-1] + return s -- cgit 1.5.1 From 4fad8efbfb1726c72bdd7cbbacc894b8701efec3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 15:05:05 +0100 Subject: Fix stackoverflow and logcontexts from linearizer 1. make it not blow out the stack when there are more than 50 things waiting for a lock. Fixes https://github.com/matrix-org/synapse/issues/2505. 2. Make it not mess up the log contexts. --- synapse/util/async.py | 24 ++++++++++++++++++++++-- tests/util/test_linearizer.py | 28 ++++++++++++++++++++++++---- 2 files changed, 46 insertions(+), 6 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/async.py b/synapse/util/async.py index bb252f75d7..0fd5b42523 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -203,7 +203,26 @@ class Linearizer(object): except: logger.exception("Unexpected exception in Linearizer") - logger.info("Acquired linearizer lock %r for key %r", self.name, key) + logger.info("Acquired linearizer lock %r for key %r", self.name, + key) + + # if the code holding the lock completes synchronously, then it + # will recursively run the next claimant on the list. That can + # relatively rapidly lead to stack exhaustion. This is essentially + # the same problem as http://twistedmatrix.com/trac/ticket/9304. + # + # In order to break the cycle, we add a cheeky sleep(0) here to + # ensure that we fall back to the reactor between each iteration. + # + # (There's no particular need for it to happen before we return + # the context manager, but it needs to happen while we hold the + # lock, and the context manager's exit code must be synchronous, + # so actually this is the only sensible place. + yield run_on_reactor() + + else: + logger.info("Acquired uncontended linearizer lock %r for key %r", + self.name, key) @contextmanager def _ctx_manager(): @@ -211,7 +230,8 @@ class Linearizer(object): yield finally: logger.info("Releasing linearizer lock %r for key %r", self.name, key) - new_defer.callback(None) + with PreserveLoggingContext(): + new_defer.callback(None) current_d = self.key_to_defer.get(key) if current_d is new_defer: self.key_to_defer.pop(key, None) diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index afcba482f9..793a88e462 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -12,8 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - +from synapse.util import async, logcontext from tests import unittest from twisted.internet import defer @@ -38,7 +37,28 @@ class LinearizerTestCase(unittest.TestCase): with cm1: self.assertFalse(d2.called) - self.assertTrue(d2.called) - with (yield d2): pass + + def test_lots_of_queued_things(self): + # we have one slow thing, and lots of fast things queued up behind it. + # it should *not* explode the stack. + linearizer = Linearizer() + + @defer.inlineCallbacks + def func(i, sleep=False): + with logcontext.LoggingContext("func(%s)" % i) as lc: + with (yield linearizer.queue("")): + self.assertEqual( + logcontext.LoggingContext.current_context(), lc) + if sleep: + yield async.sleep(0) + + self.assertEqual( + logcontext.LoggingContext.current_context(), lc) + + func(0, sleep=True) + for i in xrange(1, 100): + func(i) + + return func(1000) -- cgit 1.5.1 From f30c4ed2bc2255dc7182bd026fb6437afec735a5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 17:26:17 +0100 Subject: logformatter: fix AttributeError make sure we have the relevant fields before we try to log them. --- synapse/util/logformatter.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index 60504162e9..cdbc4bffd7 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -33,9 +33,17 @@ class LogFormatter(logging.Formatter): def formatException(self, ei): sio = StringIO.StringIO() - sio.write("Capture point (most recent call last):\n") - traceback.print_stack(ei[2].tb_frame.f_back, None, sio) - traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + (typ, val, tb) = ei + + # log the stack above the exception capture point if possible, but + # check that we actually have an f_back attribute to work around + # https://twistedmatrix.com/trac/ticket/9305 + + if tb and hasattr(tb.tb_frame, 'f_back'): + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(tb.tb_frame.f_back, None, sio) + + traceback.print_exception(typ, val, tb, None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": -- cgit 1.5.1 From 2e9f5ea31a9c66eceb6276c5241cc6537cb0ae4c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 10:59:30 +0100 Subject: Fix logcontext handling for persist_events * don't use preserve_context_over_deferred, which is known broken. * remove a redundant preserve_fn. * add/improve some comments --- synapse/storage/events.py | 24 +++++++++++++++++------- synapse/util/async.py | 5 +++++ 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4f0b43c36d..637640ec2a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -21,7 +21,7 @@ from synapse.events.utils import prune_event from synapse.util.async import ObservableDeferred from synapse.util.logcontext import ( - preserve_fn, PreserveLoggingContext, preserve_context_over_deferred + preserve_fn, PreserveLoggingContext, make_deferred_yieldable ) from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -88,13 +88,23 @@ class _EventPeristenceQueue(object): def add_to_queue(self, room_id, events_and_contexts, backfilled): """Add events to the queue, with the given persist_event options. + NB: due to the normal usage pattern of this method, it does *not* + follow the synapse logcontext rules, and leaves the logcontext in + place whether or not the returned deferred is ready. + Args: room_id (str): events_and_contexts (list[(EventBase, EventContext)]): backfilled (bool): + + Returns: + defer.Deferred: a deferred which will resolve once the events are + persisted. Runs its callbacks *without* a logcontext. """ queue = self._event_persist_queues.setdefault(room_id, deque()) if queue: + # if the last item in the queue has the same `backfilled` setting, + # we can just add these new events to that item. end_item = queue[-1] if end_item.backfilled == backfilled: end_item.events_and_contexts.extend(events_and_contexts) @@ -113,11 +123,11 @@ class _EventPeristenceQueue(object): def handle_queue(self, room_id, per_item_callback): """Attempts to handle the queue for a room if not already being handled. - The given callback will be invoked with for each item in the queue,1 + The given callback will be invoked with for each item in the queue, of type _EventPersistQueueItem. The per_item_callback will continuously be called with new items, unless the queue becomnes empty. The return value of the function will be given to the deferreds waiting on the item, - exceptions will be passed to the deferres as well. + exceptions will be passed to the deferreds as well. This function should therefore be called whenever anything is added to the queue. @@ -233,7 +243,7 @@ class EventsStore(SQLBaseStore): deferreds = [] for room_id, evs_ctxs in partitioned.iteritems(): - d = preserve_fn(self._event_persist_queue.add_to_queue)( + d = self._event_persist_queue.add_to_queue( room_id, evs_ctxs, backfilled=backfilled, ) @@ -242,7 +252,7 @@ class EventsStore(SQLBaseStore): for room_id in partitioned: self._maybe_start_persisting(room_id) - return preserve_context_over_deferred( + return make_deferred_yieldable( defer.gatherResults(deferreds, consumeErrors=True) ) @@ -267,7 +277,7 @@ class EventsStore(SQLBaseStore): self._maybe_start_persisting(event.room_id) - yield preserve_context_over_deferred(deferred) + yield make_deferred_yieldable(deferred) max_persisted_id = yield self._stream_id_gen.get_current_token() defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id)) @@ -1526,7 +1536,7 @@ class EventsStore(SQLBaseStore): if not allow_rejected: rows[:] = [r for r in rows if not r["rejects"]] - res = yield preserve_context_over_deferred(defer.gatherResults( + res = yield make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], diff --git a/synapse/util/async.py b/synapse/util/async.py index 0fd5b42523..a0a9039475 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -53,6 +53,11 @@ class ObservableDeferred(object): Cancelling or otherwise resolving an observer will not affect the original ObservableDeferred. + + NB that it does not attempt to do anything with logcontexts; in general + you should probably make_deferred_yieldable the deferreds + returned by `observe`, and ensure that the original deferred runs its + callbacks in the sentinel logcontext. """ __slots__ = ["_deferred", "_observers", "_result"] -- cgit 1.5.1 From eaaabc6c4f93d49f090ef7ff930fa2739428a36f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Oct 2017 15:52:32 +0100 Subject: replace 'except:' with 'except Exception:' what could possibly go wrong --- synapse/app/_base.py | 2 +- synapse/appservice/scheduler.py | 2 +- synapse/config/server.py | 2 +- synapse/crypto/context_factory.py | 2 +- synapse/crypto/event_signing.py | 2 +- synapse/crypto/keyring.py | 2 +- synapse/event_auth.py | 4 ++-- synapse/events/spamcheck.py | 2 +- synapse/federation/transport/server.py | 6 +++--- synapse/handlers/federation.py | 16 ++++++++-------- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/message.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/profile.py | 6 +++--- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/search.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/http/server.py | 2 +- synapse/http/servlet.py | 6 +++--- synapse/http/site.py | 2 +- synapse/notifier.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 6 +++--- synapse/push/pusher.py | 2 +- synapse/push/pusherpool.py | 6 +++--- synapse/replication/tcp/resource.py | 2 +- synapse/rest/client/v1/directory.py | 2 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 4 ++-- synapse/rest/client/v1/room.py | 6 +++--- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/user_directory.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v1/_base.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 6 +++--- synapse/state.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/background_updates.py | 2 +- synapse/storage/events.py | 2 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/roommember.py | 2 +- synapse/storage/schema/delta/30/as_users.py | 2 +- synapse/storage/search.py | 4 ++-- synapse/streams/config.py | 6 +++--- synapse/types.py | 8 ++++---- synapse/util/__init__.py | 12 ++++++------ synapse/util/async.py | 6 +++--- synapse/util/logcontext.py | 2 +- synapse/util/retryutils.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/utils.py | 8 ++++---- 54 files changed, 93 insertions(+), 93 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/_base.py b/synapse/app/_base.py index cf4730730d..9477737759 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -19,7 +19,7 @@ import sys try: import affinity -except: +except Exception: affinity = None from daemonize import Daemonize diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 68a9de17b8..6da315473d 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -123,7 +123,7 @@ class _ServiceQueuer(object): with Measure(self.clock, "servicequeuer.send"): try: yield self.txn_ctrl.send(service, events) - except: + except Exception: logger.exception("AS request failed") finally: self.requests_in_flight.discard(service.id) diff --git a/synapse/config/server.py b/synapse/config/server.py index c9a1715f1f..b66993dab9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -303,7 +303,7 @@ def read_gc_thresholds(thresholds): return ( int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), ) - except: + except Exception: raise ConfigError( "Value of `gc_threshold` must be a list of three integers if set" ) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index aad4752fe7..cff3ca809a 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -34,7 +34,7 @@ class ServerContextFactory(ssl.ContextFactory): try: _ecCurve = _OpenSSLECCurve(_defaultCurveName) _ecCurve.addECKeyToContext(context) - except: + except Exception: logger.exception("Failed to enable elliptic curve for TLS") context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3) context.use_certificate_chain_file(config.tls_certificate_file) diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index ec7711ba7d..0d0e7b5286 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -43,7 +43,7 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256): message_hash_base64 = event.hashes[name] try: message_hash_bytes = decode_base64(message_hash_base64) - except: + except Exception: raise SynapseError( 400, "Invalid base64: %s" % (message_hash_base64,), diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 054bac456d..35f810b07b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -759,7 +759,7 @@ def _handle_key_deferred(verify_request): )) try: verify_signed_json(json_object, server_name, verify_key) - except: + except Exception: raise SynapseError( 401, "Invalid signature for server %s with key %s:%s" % ( diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 9e746a28bf..061ee86b16 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -443,12 +443,12 @@ def _check_power_levels(event, auth_events): for k, v in user_list.items(): try: UserID.from_string(k) - except: + except Exception: raise SynapseError(400, "Not a valid user_id: %s" % (k,)) try: int(v) - except: + except Exception: raise SynapseError(400, "Not a valid power level: %s" % (v,)) key = (event.type, event.state_key, ) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index dccc579eac..633e068eb8 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -22,7 +22,7 @@ class SpamChecker(object): config = None try: module, config = hs.config.spam_checker - except: + except Exception: pass if module is not None: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index f0778c65c5..1e68a898d5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -112,7 +112,7 @@ class Authenticator(object): key = strip_quotes(param_dict["key"]) sig = strip_quotes(param_dict["sig"]) return (origin, key, sig) - except: + except Exception: raise AuthenticationError( 400, "Malformed Authorization header", Codes.UNAUTHORIZED ) @@ -177,7 +177,7 @@ class BaseFederationServlet(object): if self.REQUIRE_AUTH: logger.exception("authenticate_request failed") raise - except: + except Exception: logger.exception("authenticate_request failed") raise @@ -270,7 +270,7 @@ class FederationSendServlet(BaseFederationServlet): code, response = yield self.handler.on_incoming_transaction( transaction_data ) - except: + except Exception: logger.exception("on_incoming_transaction failed") raise diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7711cded01..8b1e606754 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -227,7 +227,7 @@ class FederationHandler(BaseHandler): state, auth_chain = yield self.replication_layer.get_state_for_room( origin, pdu.room_id, pdu.event_id, ) - except: + except Exception: logger.exception("Failed to get state for event: %s", pdu.event_id) yield self._process_received_pdu( @@ -461,7 +461,7 @@ class FederationHandler(BaseHandler): def check_match(id): try: return server_name == get_domain_from_id(id) - except: + except Exception: return False # Parses mapping `event_id -> (type, state_key) -> state event_id` @@ -499,7 +499,7 @@ class FederationHandler(BaseHandler): continue try: domain = get_domain_from_id(ev.state_key) - except: + except Exception: continue if domain != server_name: @@ -738,7 +738,7 @@ class FederationHandler(BaseHandler): joined_domains[dom] = min(d, old_d) else: joined_domains[dom] = d - except: + except Exception: pass return sorted(joined_domains.items(), key=lambda d: d[1]) @@ -940,7 +940,7 @@ class FederationHandler(BaseHandler): room_creator_user_id="", is_public=False ) - except: + except Exception: # FIXME pass @@ -1775,7 +1775,7 @@ class FederationHandler(BaseHandler): [e_id for e_id, _ in event.auth_events] ) seen_events = set(have_events.keys()) - except: + except Exception: # FIXME: logger.exception("Failed to get auth chain") @@ -1899,7 +1899,7 @@ class FederationHandler(BaseHandler): except AuthError: pass - except: + except Exception: # FIXME: logger.exception("Failed to query auth chain") @@ -1966,7 +1966,7 @@ class FederationHandler(BaseHandler): def get_next(it, opt=None): try: return it.next() - except: + except Exception: return opt current_local = get_next(local_iter) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 10f5f35a69..9718d4abc5 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -214,7 +214,7 @@ class InitialSyncHandler(BaseHandler): }) d["account_data"] = account_data_events - except: + except Exception: logger.exception("Failed to get snapshot") yield concurrently_execute(handle_room, room_list, 10) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 28792788d9..21f1717dd2 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -563,7 +563,7 @@ class MessageHandler(BaseHandler): try: dump = ujson.dumps(unfreeze(event.content)) ujson.loads(dump) - except: + except Exception: logger.exception("Failed to encode content: %r", event.content) raise diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c7c0b0a1e2..fa96ea69cd 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -364,7 +364,7 @@ class PresenceHandler(object): ) preserve_fn(self._update_states)(changes) - except: + except Exception: logger.exception("Exception in _handle_timeouts loop") @defer.inlineCallbacks diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index e56e0a52bf..62b9bd503e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -118,7 +118,7 @@ class ProfileHandler(BaseHandler): logger.exception("Failed to get displayname") raise - except: + except Exception: logger.exception("Failed to get displayname") else: defer.returnValue(result["displayname"]) @@ -165,7 +165,7 @@ class ProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get avatar_url") raise - except: + except Exception: logger.exception("Failed to get avatar_url") defer.returnValue(result["avatar_url"]) @@ -266,7 +266,7 @@ class ProfileHandler(BaseHandler): }, ignore_backoff=True, ) - except: + except Exception: logger.exception("Failed to get avatar_url") yield self.store.update_remote_profile_cache( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 52aa9964d9..49dc33c147 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -289,7 +289,7 @@ class RegistrationHandler(BaseHandler): try: identity_handler = self.hs.get_handlers().identity_handler threepid = yield identity_handler.threepid_from_creds(c) - except: + except Exception: logger.exception("Couldn't validate 3pid") raise RegistrationError(400, "Couldn't validate 3pid") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e945bd35bc..496f1fc39b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -108,7 +108,7 @@ class RoomCreationHandler(BaseHandler): for i in invite_list: try: UserID.from_string(i) - except: + except Exception: raise SynapseError(400, "Invalid user_id: %s" % (i,)) invite_3pid_list = config.get("invite_3pid", []) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index df75d70fac..9772ed1a0e 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -61,7 +61,7 @@ class SearchHandler(BaseHandler): assert batch_group is not None assert batch_group_key is not None assert batch_token is not None - except: + except Exception: raise SynapseError(400, "Invalid batch") try: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 8c8b7fa656..833496b72d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -550,7 +550,7 @@ class MatrixFederationHttpClient(object): length = yield _readBodyToFile( response, output_stream, max_size ) - except: + except Exception: logger.exception("Failed to download body") raise diff --git a/synapse/http/server.py b/synapse/http/server.py index 8a27e3b422..3ca1c9947c 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -130,7 +130,7 @@ def wrap_request_handler(request_handler, include_metrics=False): pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) - except: + except Exception: logger.exception( "Failed handle request %s.%s on %r: %r", request_handler.__module__, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 9a4c36ad5d..8118ee7cc2 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -48,7 +48,7 @@ def parse_integer_from_args(args, name, default=None, required=False): if name in args: try: return int(args[name][0]) - except: + except Exception: message = "Query parameter %r must be an integer" % (name,) raise SynapseError(400, message) else: @@ -88,7 +88,7 @@ def parse_boolean_from_args(args, name, default=None, required=False): "true": True, "false": False, }[args[name][0]] - except: + except Exception: message = ( "Boolean query parameter %r must be one of" " ['true', 'false']" @@ -162,7 +162,7 @@ def parse_json_value_from_request(request): """ try: content_bytes = request.content.read() - except: + except Exception: raise SynapseError(400, "Error reading JSON content.") try: diff --git a/synapse/http/site.py b/synapse/http/site.py index 4b09d7ee66..cd1492b1c3 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -67,7 +67,7 @@ class SynapseRequest(Request): ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count db_txn_duration = context.db_txn_duration - except: + except Exception: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration = (0, 0) diff --git a/synapse/notifier.py b/synapse/notifier.py index 385208b574..626da778cd 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -289,7 +289,7 @@ class Notifier(object): for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) - except: + except Exception: logger.exception("Failed to notify listener") self.notify_replication() diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index a69dda7b09..58df98a793 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -121,7 +121,7 @@ class EmailPusher(object): starting_max_ordering = self.max_stream_ordering try: yield self._unsafe_process() - except: + except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 62c41cd9db..74c0bc462c 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -131,7 +131,7 @@ class HttpPusher(object): starting_max_ordering = self.max_stream_ordering try: yield self._unsafe_process() - except: + except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break @@ -314,7 +314,7 @@ class HttpPusher(object): defer.returnValue([]) try: resp = yield self.http_client.post_json_get_json(self.url, notification_dict) - except: + except Exception: logger.warn("Failed to push %s ", self.url) defer.returnValue(False) rejected = [] @@ -345,7 +345,7 @@ class HttpPusher(object): } try: resp = yield self.http_client.post_json_get_json(self.url, d) - except: + except Exception: logger.exception("Failed to push %s ", self.url) defer.returnValue(False) rejected = [] diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 491f27bded..71576330a9 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) try: from synapse.push.emailpusher import EmailPusher from synapse.push.mailer import Mailer, load_jinja2_templates -except: +except Exception: pass diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 43cb6e9c01..7c069b662e 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -137,7 +137,7 @@ class PusherPool: ) yield preserve_context_over_deferred(defer.gatherResults(deferreds)) - except: + except Exception: logger.exception("Exception in pusher on_new_notifications") @defer.inlineCallbacks @@ -162,7 +162,7 @@ class PusherPool: ) yield preserve_context_over_deferred(defer.gatherResults(deferreds)) - except: + except Exception: logger.exception("Exception in pusher on_new_receipts") @defer.inlineCallbacks @@ -188,7 +188,7 @@ class PusherPool: for pusherdict in pushers: try: p = self.pusher_factory.create_pusher(pusherdict) - except: + except Exception: logger.exception("Couldn't start a pusher: caught Exception") continue if p: diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 6c1beca4e3..1d03e79b85 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -162,7 +162,7 @@ class ReplicationStreamer(object): ) try: updates, current_token = yield stream.get_updates() - except: + except Exception: logger.info("Failed to handle stream %s", stream.NAME) raise diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index f15aa5c13f..1c3933380f 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -93,7 +93,7 @@ class ClientDirectoryServer(ClientV1RestServlet): ) except SynapseError as e: raise e - except: + except Exception: logger.exception("Failed to create association") raise except AuthError: diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 47b2dc45e7..4a73813c58 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -78,7 +78,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): raise KeyError() except SynapseError as e: raise e - except: + except Exception: raise SynapseError(400, "Unable to parse state") yield self.presence_handler.set_state(user, state) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index d7edc34245..e4e3611a14 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -52,7 +52,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): try: new_name = content["displayname"] - except: + except Exception: defer.returnValue((400, "Unable to parse name")) yield self.profile_handler.set_displayname( @@ -94,7 +94,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): content = parse_json_object_from_request(request) try: new_name = content["avatar_url"] - except: + except Exception: defer.returnValue((400, "Unable to parse name")) yield self.profile_handler.set_avatar_url( diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 6c379d53ac..75b735b47d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -238,7 +238,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): try: content = parse_json_object_from_request(request) - except: + except Exception: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. content = {} @@ -247,7 +247,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): room_id = room_identifier try: remote_room_hosts = request.args["server_name"] - except: + except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): handler = self.handlers.room_member_handler @@ -587,7 +587,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): try: content = parse_json_object_from_request(request) - except: + except Exception: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. content = {} diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index d2b2fd66e6..1b9dc4528d 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -50,7 +50,7 @@ class GetFilterRestServlet(RestServlet): try: filter_id = int(filter_id) - except: + except Exception: raise SynapseError(400, "Invalid filter_id") try: diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a1e0e53b33..a0a8e4b8e4 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -125,7 +125,7 @@ class SyncRestServlet(RestServlet): filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) - except: + except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 6e012da4aa..2d4a43c353 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -65,7 +65,7 @@ class UserDirectorySearchRestServlet(RestServlet): try: search_term = body["search_term"] - except: + except Exception: raise SynapseError(400, "`search_term` is required field") results = yield self.user_directory_handler.search_users( diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 9fe2013657..cc2842aa72 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -213,7 +213,7 @@ class RemoteKey(Resource): ) except KeyLookupError as e: logger.info("Failed to fetch key: %s", e) - except: + except Exception: logger.exception("Failed to get key for %r", server_name) yield self.query_keys( request, query, query_remote_on_cache_miss=False diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index bce077becf..95fa95fce3 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -45,7 +45,7 @@ def parse_media_id(request): except UnicodeDecodeError: pass return server_name, media_id, file_name - except: + except Exception: raise SynapseError( 404, "Invalid media id token %r" % (request.postpath,), diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 6b50b45b1f..eed9056a2f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -310,7 +310,7 @@ class MediaRepository(object): media_length=length, filesystem_id=file_id, ) - except: + except Exception: os.remove(fname) raise diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 2a3e37fdf4..80114fca0d 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -367,7 +367,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass yield self.store.delete_url_cache(removed_media) @@ -397,7 +397,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) @@ -415,7 +415,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass yield self.store.delete_url_cache_media(removed_media) diff --git a/synapse/state.py b/synapse/state.py index dcdcdef65e..9e624b4937 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -560,7 +560,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_ resolved_state = _resolve_state_events( conflicted_state, auth_events ) - except: + except Exception: logger.exception("Failed to resolve state") raise diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 5124a833a5..6caf7b3356 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -103,7 +103,7 @@ class LoggingTransaction(object): "[SQL values] {%s} %r", self.name, args[0] ) - except: + except Exception: # Don't let logging failures stop SQL from working pass diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 7157fb1dfb..a6e6f52a6a 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -98,7 +98,7 @@ class BackgroundUpdateStore(SQLBaseStore): result = yield self.do_next_background_update( self.BACKGROUND_UPDATE_DURATION_MS ) - except: + except Exception: logger.exception("Error doing update") else: if result is None: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 637640ec2a..4298d8baf1 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1481,7 +1481,7 @@ class EventsStore(SQLBaseStore): for i in ids if i in res ]) - except: + except Exception: logger.exception("Failed to callback") with PreserveLoggingContext(): reactor.callFromThread(fire, event_list, row_dict) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index ccaaabcfa0..817c2185c8 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -66,7 +66,7 @@ def prepare_database(db_conn, database_engine, config): cur.close() db_conn.commit() - except: + except Exception: db_conn.rollback() raise diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index a0fc9a6867..3fa8019eb7 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -636,7 +636,7 @@ class RoomMemberStore(SQLBaseStore): room_id = row["room_id"] try: content = json.loads(row["content"]) - except: + except Exception: continue display_name = content.get("displayname", None) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index 5b7d8d1ab5..c53e53c94f 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -22,7 +22,7 @@ def run_create(cur, database_engine, *args, **kwargs): # NULL indicates user was not registered by an appservice. try: cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") - except: + except Exception: # Maybe we already added the column? Hope so... pass diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 8f2b3c4435..05d4ef586e 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -81,7 +81,7 @@ class SearchStore(BackgroundUpdateStore): etype = row["type"] try: content = json.loads(row["content"]) - except: + except Exception: continue if etype == "m.room.message": @@ -407,7 +407,7 @@ class SearchStore(BackgroundUpdateStore): origin_server_ts, stream = pagination_token.split(",") origin_server_ts = int(origin_server_ts) stream = int(stream) - except: + except Exception: raise SynapseError(400, "Invalid pagination token") clauses.append( diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 4f089bfb94..ca78e551cb 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -80,13 +80,13 @@ class PaginationConfig(object): from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) - except: + except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) - except: + except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) @@ -98,7 +98,7 @@ class PaginationConfig(object): try: return PaginationConfig(from_tok, to_tok, direction, limit) - except: + except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.") diff --git a/synapse/types.py b/synapse/types.py index 1eeda0b72f..6e76c016d9 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -127,7 +127,7 @@ class DomainSpecificString( try: cls.from_string(s) return True - except: + except Exception: return False __str__ = to_string @@ -213,7 +213,7 @@ class StreamToken( # i.e. old token from before receipt_key keys.append("0") return cls(*keys) - except: + except Exception: raise SynapseError(400, "Invalid Token") def to_string(self): @@ -299,7 +299,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): if string[0] == 't': parts = string[1:].split('-', 1) return cls(topological=int(parts[0]), stream=int(parts[1])) - except: + except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) @@ -308,7 +308,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): try: if string[0] == 's': return cls(topological=None, stream=int(string[1:])) - except: + except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 2a2360ab5d..cd1ce62bdb 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -82,7 +82,7 @@ class Clock(object): def cancel_call_later(self, timer, ignore_errs=False): try: timer.cancel() - except: + except Exception: if not ignore_errs: raise @@ -97,12 +97,12 @@ class Clock(object): try: ret_deferred.errback(e) - except: + except Exception: pass try: given_deferred.cancel() - except: + except Exception: pass timer = None @@ -110,7 +110,7 @@ class Clock(object): def cancel(res): try: self.cancel_call_later(timer) - except: + except Exception: pass return res @@ -119,7 +119,7 @@ class Clock(object): def success(res): try: ret_deferred.callback(res) - except: + except Exception: pass return res @@ -127,7 +127,7 @@ class Clock(object): def err(res): try: ret_deferred.errback(res) - except: + except Exception: pass given_deferred.addCallbacks(callback=success, errback=err) diff --git a/synapse/util/async.py b/synapse/util/async.py index a0a9039475..1a884e96ee 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -73,7 +73,7 @@ class ObservableDeferred(object): try: # TODO: Handle errors here. self._observers.pop().callback(r) - except: + except Exception: pass return r @@ -83,7 +83,7 @@ class ObservableDeferred(object): try: # TODO: Handle errors here. self._observers.pop().errback(f) - except: + except Exception: pass if consumeErrors: @@ -205,7 +205,7 @@ class Linearizer(object): try: with PreserveLoggingContext(): yield current_defer - except: + except Exception: logger.exception("Unexpected exception in Linearizer") logger.info("Acquired linearizer lock %r for key %r", self.name, diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 990216145e..9683cc7265 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -42,7 +42,7 @@ try: def get_thread_resource_usage(): return resource.getrusage(RUSAGE_THREAD) -except: +except Exception: # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we # won't track resource usage by returning None. def get_thread_resource_usage(): diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 4fa9d1a03c..1adedbb361 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -189,7 +189,7 @@ class RetryDestinationLimiter(object): yield self.store.set_destination_retry_timings( self.destination, retry_last_ts, self.retry_interval ) - except: + except Exception: logger.exception( "Failed to store set_destination_retry_timings", ) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 9e98d0e330..79f569e787 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -65,7 +65,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): for f in self.as_yaml_files: try: os.remove(f) - except: + except Exception: pass def _add_appservice(self, as_token, id, url, hs_token, sender): diff --git a/tests/utils.py b/tests/utils.py index 3c81a3e16d..d2ebce4b2e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -184,7 +184,7 @@ class MockHttpResource(HttpServer): mock_request.args = urlparse.parse_qs(path.split('?')[1]) mock_request.path = path.split('?')[0] path = mock_request.path - except: + except Exception: pass for (method, pattern, func) in self.callbacks: @@ -364,13 +364,13 @@ class MemoryDataStore(object): return { "name": self.tokens_to_users[token], } - except: + except Exception: raise StoreError(400, "User does not exist.") def get_room(self, room_id): try: return self.rooms[room_id] - except: + except Exception: return None def store_room(self, room_id, room_creator_user_id, is_public): @@ -499,7 +499,7 @@ class DeferredMockCallable(object): for _, _, d in self.expectations: try: d.errback(failure) - except: + except Exception: pass raise failure -- cgit 1.5.1 From 0be99858f34165258af5d2865818f5baee35118b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Oct 2017 15:56:38 +0100 Subject: fix vars named `l` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E741 says "do not use variables named ‘l’, ‘O’, or ‘I’". --- synapse/util/__init__.py | 6 +++--- synapse/util/wheel_timer.py | 5 +---- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index cd1ce62bdb..756d8ffa32 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,9 +59,9 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - l = task.LoopingCall(f) - l.start(msec / 1000.0, now=False) - return l + call = task.LoopingCall(f) + call.start(msec / 1000.0, now=False) + return call def call_later(self, delay, callback, *args, **kwargs): """Call something later diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 7412fc57a4..b70f9a6b0a 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -91,7 +91,4 @@ class WheelTimer(object): return ret def __len__(self): - l = 0 - for entry in self.entries: - l += len(entry.queue) - return l + return sum(len(entry.queue) for entry in self.entries) -- cgit 1.5.1 From bf993db11cac53cd4e77f2f7df07a5d8987b105e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Nov 2017 00:48:57 +0000 Subject: Logging and logcontext fixes for Limiter Add some logging to the Limiter in a similar spirit to the Linearizer, to help debug issues. Also fix a logcontext leak. Also refactor slightly to avoid throwing exceptions. --- synapse/util/async.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/async.py b/synapse/util/async.py index 1a884e96ee..e786fb38a9 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -278,8 +278,13 @@ class Limiter(object): if entry[0] >= self.max_count: new_defer = defer.Deferred() entry[1].append(new_defer) + + logger.info("Waiting to acquire limiter lock for key %r", key) with PreserveLoggingContext(): yield new_defer + logger.info("Acquired limiter lock for key %r", key) + else: + logger.info("Acquired uncontended limiter lock for key %r", key) entry[0] += 1 @@ -288,16 +293,21 @@ class Limiter(object): try: yield finally: + logger.info("Releasing limiter lock for key %r", key) + # We've finished executing so check if there are any things # blocked waiting to execute and start one of them entry[0] -= 1 - try: - entry[1].pop(0).callback(None) - except IndexError: - # If nothing else is executing for this key then remove it - # from the map - if entry[0] == 0: - self.key_to_defer.pop(key, None) + + if entry[1]: + next_def = entry[1].pop(0) + + with PreserveLoggingContext(): + next_def.callback(None) + elif entry[0] == 0: + # We were the last thing for this key: remove it from the + # map. + del self.key_to_defer[key] defer.returnValue(_ctx_manager()) -- cgit 1.5.1 From 7e6fa29cb5ba1abd8b4f3873b0ef171c7c8aba26 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Nov 2017 11:22:42 +0000 Subject: Remove preserve_context_over_{fn, deferred} Both of these functions ae known to leak logcontexts. Replace the remaining calls to them and kill them off. --- docs/log_contexts.rst | 4 ---- synapse/federation/federation_client.py | 4 ++-- synapse/handlers/appservice.py | 4 ++-- synapse/handlers/initial_sync.py | 4 ++-- synapse/push/pusherpool.py | 6 +++--- synapse/storage/stream.py | 4 ++-- synapse/util/async.py | 6 +++--- synapse/util/distributor.py | 22 +++++++--------------- synapse/util/logcontext.py | 31 ------------------------------- synapse/visibility.py | 4 ++-- 10 files changed, 23 insertions(+), 66 deletions(-) (limited to 'synapse/util') diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index eb1784e700..b19b7fa1ea 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -298,10 +298,6 @@ It can be used like this: # this will now be logged against the request context logger.debug("Request handling complete") -XXX: I think ``preserve_context_over_fn`` is supposed to do the first option, -but the fact that it does ``preserve_context_over_deferred`` on its results -means that its use is fraught with difficulty. - Passing synapse deferreds into third-party functions ---------------------------------------------------- diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7c5e5d957f..b8f02f5391 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -25,7 +25,7 @@ from synapse.api.errors import ( from synapse.util import unwrapFirstError, logcontext from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logutils import log_function -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.events import FrozenEvent, builder import synapse.metrics @@ -420,7 +420,7 @@ class FederationClient(FederationBase): for e_id in batch ] - res = yield preserve_context_over_deferred( + res = yield make_deferred_yieldable( defer.DeferredList(deferreds, consumeErrors=True) ) for success, result in res: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 543bf28aec..feca3e4c10 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -17,7 +17,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.util.metrics import Measure -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn import logging @@ -159,7 +159,7 @@ class ApplicationServicesHandler(object): def query_3pe(self, kind, protocol, fields): services = yield self._get_services_for_3pn(protocol) - results = yield preserve_context_over_deferred(defer.DeferredList([ + results = yield make_deferred_yieldable(defer.DeferredList([ preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields) for service in services ], consumeErrors=True)) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 9718d4abc5..c5267b4b84 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util import unwrapFirstError from synapse.util.async import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -163,7 +163,7 @@ class InitialSyncHandler(BaseHandler): lambda states: states[event.event_id] ) - (messages, token), current_state = yield preserve_context_over_deferred( + (messages, token), current_state = yield make_deferred_yieldable( defer.gatherResults( [ preserve_fn(self.store.get_recent_events_for_room)( diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 7c069b662e..34cb108dcb 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -17,7 +17,7 @@ from twisted.internet import defer from .pusher import PusherFactory -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.async import run_on_reactor import logging @@ -136,7 +136,7 @@ class PusherPool: ) ) - yield preserve_context_over_deferred(defer.gatherResults(deferreds)) + yield make_deferred_yieldable(defer.gatherResults(deferreds)) except Exception: logger.exception("Exception in pusher on_new_notifications") @@ -161,7 +161,7 @@ class PusherPool: preserve_fn(p.on_new_receipts)(min_stream_id, max_stream_id) ) - yield preserve_context_over_deferred(defer.gatherResults(deferreds)) + yield make_deferred_yieldable(defer.gatherResults(deferreds)) except Exception: logger.exception("Exception in pusher on_new_receipts") diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index dddd5fc0e7..52bdce5be2 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,7 +39,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.storage.engines import PostgresEngine, Sqlite3Engine import logging @@ -234,7 +234,7 @@ class StreamStore(SQLBaseStore): results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): - res = yield preserve_context_over_deferred(defer.gatherResults([ + res = yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, ) diff --git a/synapse/util/async.py b/synapse/util/async.py index e786fb38a9..0729bb2863 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -17,7 +17,7 @@ from twisted.internet import defer, reactor from .logcontext import ( - PreserveLoggingContext, preserve_fn, preserve_context_over_deferred, + PreserveLoggingContext, make_deferred_yieldable, preserve_fn ) from synapse.util import logcontext, unwrapFirstError @@ -351,7 +351,7 @@ class ReadWriteLock(object): # We wait for the latest writer to finish writing. We can safely ignore # any existing readers... as they're readers. - yield curr_writer + yield make_deferred_yieldable(curr_writer) @contextmanager def _ctx_manager(): @@ -380,7 +380,7 @@ class ReadWriteLock(object): curr_readers.clear() self.key_to_current_writer[key] = new_defer - yield preserve_context_over_deferred(defer.gatherResults(to_wait_on)) + yield make_deferred_yieldable(defer.gatherResults(to_wait_on)) @contextmanager def _ctx_manager(): diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index e68f94ce77..734331caaa 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -13,32 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +import logging -from synapse.util.logcontext import ( - PreserveLoggingContext, preserve_context_over_fn -) +from twisted.internet import defer from synapse.util import unwrapFirstError - -import logging - +from synapse.util.logcontext import PreserveLoggingContext logger = logging.getLogger(__name__) def user_left_room(distributor, user, room_id): - return preserve_context_over_fn( - distributor.fire, - "user_left_room", user=user, room_id=room_id - ) + with PreserveLoggingContext(): + distributor.fire("user_left_room", user=user, room_id=room_id) def user_joined_room(distributor, user, room_id): - return preserve_context_over_fn( - distributor.fire, - "user_joined_room", user=user, room_id=room_id - ) + with PreserveLoggingContext(): + distributor.fire("user_joined_room", user=user, room_id=room_id) class Distributor(object): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 9683cc7265..92b9413a35 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -291,37 +291,6 @@ class _PreservingContextDeferred(defer.Deferred): return g -def preserve_context_over_fn(fn, *args, **kwargs): - """Takes a function and invokes it with the given arguments, but removes - and restores the current logging context while doing so. - - If the result is a deferred, call preserve_context_over_deferred before - returning it. - """ - with PreserveLoggingContext(): - res = fn(*args, **kwargs) - - if isinstance(res, defer.Deferred): - return preserve_context_over_deferred(res) - else: - return res - - -def preserve_context_over_deferred(deferred, context=None): - """Given a deferred wrap it such that any callbacks added later to it will - be invoked with the current context. - - Deprecated: this almost certainly doesn't do want you want, ie make - the deferred follow the synapse logcontext rules: try - ``make_deferred_yieldable`` instead. - """ - if context is None: - context = LoggingContext.current_context() - d = _PreservingContextDeferred(context) - deferred.chainDeferred(d) - return d - - def preserve_fn(f): """Wraps a function, to ensure that the current context is restored after return from the function, and that the sentinel context is set once the diff --git a/synapse/visibility.py b/synapse/visibility.py index d7dbdc77ff..aaca2c584c 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -17,7 +17,7 @@ from twisted.internet import defer from synapse.api.constants import Membership, EventTypes -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn import logging @@ -58,7 +58,7 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state, always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) """ - forgotten = yield preserve_context_over_deferred(defer.gatherResults([ + forgotten = yield make_deferred_yieldable(defer.gatherResults([ defer.maybeDeferred( preserve_fn(store.who_forgot_in_room), room_id, -- cgit 1.5.1 From b2cd6accf5e584cbaf7f3c6b44addb06a883a222 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Nov 2017 23:00:10 +0000 Subject: Remove __PreservingContextDeferred too --- synapse/util/logcontext.py | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 92b9413a35..48c9f6802d 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -261,36 +261,6 @@ class PreserveLoggingContext(object): ) -class _PreservingContextDeferred(defer.Deferred): - """A deferred that ensures that all callbacks and errbacks are called with - the given logging context. - """ - def __init__(self, context): - self._log_context = context - defer.Deferred.__init__(self) - - def addCallbacks(self, callback, errback=None, - callbackArgs=None, callbackKeywords=None, - errbackArgs=None, errbackKeywords=None): - callback = self._wrap_callback(callback) - errback = self._wrap_callback(errback) - return defer.Deferred.addCallbacks( - self, callback, - errback=errback, - callbackArgs=callbackArgs, - callbackKeywords=callbackKeywords, - errbackArgs=errbackArgs, - errbackKeywords=errbackKeywords, - ) - - def _wrap_callback(self, f): - def g(res, *args, **kwargs): - with PreserveLoggingContext(self._log_context): - res = f(res, *args, **kwargs) - return res - return g - - def preserve_fn(f): """Wraps a function, to ensure that the current context is restored after return from the function, and that the sentinel context is set once the -- cgit 1.5.1 From 39f4e29d0151b56a3c8528e3149cd5765b9f600d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 15 Jan 2018 17:00:12 +0000 Subject: Reorganise request and block metrics In order to circumvent the number of duplicate foo:count metrics increasing without bounds, it's time for a rearrangement. The following are all deprecated, and replaced with synapse_util_metrics_block_count: synapse_util_metrics_block_timer:count synapse_util_metrics_block_ru_utime:count synapse_util_metrics_block_ru_stime:count synapse_util_metrics_block_db_txn_count:count synapse_util_metrics_block_db_txn_duration:count The following are all deprecated, and replaced with synapse_http_server_response_count: synapse_http_server_requests synapse_http_server_response_time:count synapse_http_server_response_ru_utime:count synapse_http_server_response_ru_stime:count synapse_http_server_response_db_txn_count:count synapse_http_server_response_db_txn_duration:count The following are renamed (the old metrics are kept for now, but deprecated): synapse_util_metrics_block_timer:total -> synapse_util_metrics_block_time_seconds synapse_util_metrics_block_ru_utime:total -> synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_stime:total -> synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_db_txn_count:total -> synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_duration:total -> synapse_util_metrics_block_db_txn_duration_seconds synapse_http_server_response_time:total -> synapse_http_server_response_time_seconds synapse_http_server_response_ru_utime:total -> synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_stime:total -> synapse_http_server_response_ru_stime_seconds synapse_http_server_response_db_txn_count:total -> synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_duration:total synapse_http_server_response_db_txn_duration_seconds --- synapse/http/server.py | 56 ++++++++++++++++++++++++++++++++++++------------- synapse/util/metrics.py | 53 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 84 insertions(+), 25 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/server.py b/synapse/http/server.py index 6e8f4c9c5f..269b65ca41 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -42,34 +42,62 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) -incoming_requests_counter = metrics.register_counter( - "requests", +# total number of responses served, split by method/servlet/tag +response_count = metrics.register_counter( + "response_count", labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) ) + outgoing_responses_counter = metrics.register_counter( "responses", labels=["method", "code"], ) -response_timer = metrics.register_distribution( - "response_time", - labels=["method", "servlet", "tag"] +response_timer = metrics.register_counter( + "response_time_seconds", + labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), ) -response_ru_utime = metrics.register_distribution( - "response_ru_utime", labels=["method", "servlet", "tag"] +response_ru_utime = metrics.register_counter( + "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), ) -response_ru_stime = metrics.register_distribution( - "response_ru_stime", labels=["method", "servlet", "tag"] +response_ru_stime = metrics.register_counter( + "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), ) -response_db_txn_count = metrics.register_distribution( - "response_db_txn_count", labels=["method", "servlet", "tag"] +response_db_txn_count = metrics.register_counter( + "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), ) -response_db_txn_duration = metrics.register_distribution( - "response_db_txn_duration", labels=["method", "servlet", "tag"] +response_db_txn_duration = metrics.register_counter( + "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), ) @@ -330,7 +358,7 @@ class RequestMetrics(object): ) return - incoming_requests_counter.inc(request.method, self.name, tag) + response_count.inc(request.method, self.name, tag) response_timer.inc_by( clock.time_msec() - self.start, request.method, diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 4ea930d3e8..8d22ff3068 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -27,25 +27,56 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) -block_timer = metrics.register_distribution( - "block_timer", - labels=["block_name"] +# total number of times we have hit this block +response_count = metrics.register_counter( + "block_count", + labels=["block_name"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_block_timer:count", + "_block_ru_utime:count", + "_block_ru_stime:count", + "_block_db_txn_count:count", + "_block_db_txn_duration:count", + ) + ) +) + +block_timer = metrics.register_counter( + "block_time_seconds", + labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_timer:total", + ), ) -block_ru_utime = metrics.register_distribution( - "block_ru_utime", labels=["block_name"] +block_ru_utime = metrics.register_counter( + "block_ru_utime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_utime:total", + ), ) -block_ru_stime = metrics.register_distribution( - "block_ru_stime", labels=["block_name"] +block_ru_stime = metrics.register_counter( + "block_ru_stime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_stime:total", + ), ) -block_db_txn_count = metrics.register_distribution( - "block_db_txn_count", labels=["block_name"] +block_db_txn_count = metrics.register_counter( + "block_db_txn_count", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) -block_db_txn_duration = metrics.register_distribution( - "block_db_txn_duration", labels=["block_name"] +block_db_txn_duration = metrics.register_counter( + "block_db_txn_duration_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) -- cgit 1.5.1 From 44a498418c62a835aae9bff8550f844888b3ab84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 11 Jan 2018 22:40:51 +0000 Subject: Optimise LoggingContext creation and copying It turns out that the only thing we use the __dict__ of LoggingContext for is `request`, and given we create lots of LoggingContexts and then copy them every time we do a db transaction or log line, using the __dict__ seems a bit redundant. Let's try to optimise things by making the request attribute explicit. --- synapse/util/logcontext.py | 25 ++++++++++++++++++------- tests/crypto/test_keyring.py | 14 +++++++------- tests/util/test_logcontext.py | 16 ++++++++-------- 3 files changed, 33 insertions(+), 22 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 48c9f6802d..ca71a1fc27 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -52,13 +52,16 @@ except Exception: class LoggingContext(object): """Additional context for log formatting. Contexts are scoped within a "with" block. + Args: name (str): Name for the context for debugging. """ __slots__ = [ - "previous_context", "name", "usage_start", "usage_end", "main_thread", - "__dict__", "tag", "alive", + "previous_context", "name", "ru_stime", "ru_utime", + "db_txn_count", "db_txn_duration", "usage_start", "usage_end", + "main_thread", "alive", + "request", "tag", ] thread_local = threading.local() @@ -96,7 +99,9 @@ class LoggingContext(object): self.db_txn_count = 0 self.db_txn_duration = 0. self.usage_start = None + self.usage_end = None self.main_thread = threading.current_thread() + self.request = None self.tag = "" self.alive = True @@ -105,7 +110,11 @@ class LoggingContext(object): @classmethod def current_context(cls): - """Get the current logging context from thread local storage""" + """Get the current logging context from thread local storage + + Returns: + LoggingContext: the current logging context + """ return getattr(cls.thread_local, "current_context", cls.sentinel) @classmethod @@ -155,11 +164,13 @@ class LoggingContext(object): self.alive = False def copy_to(self, record): - """Copy fields from this context to the record""" - for key, value in self.__dict__.items(): - setattr(record, key, value) + """Copy logging fields from this context to a log record or + another LoggingContext + """ - record.ru_utime, record.ru_stime = self.get_resource_usage() + # 'request' is the only field we currently use in the logger, so that's + # all we need to copy + record.request = self.request def start(self): if threading.current_thread() is not self.main_thread: diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 570312da84..c899fecf5d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -68,7 +68,7 @@ class KeyringTestCase(unittest.TestCase): def check_context(self, _, expected): self.assertEquals( - getattr(LoggingContext.current_context(), "test_key", None), + getattr(LoggingContext.current_context(), "request", None), expected ) @@ -82,7 +82,7 @@ class KeyringTestCase(unittest.TestCase): lookup_2_deferred = defer.Deferred() with LoggingContext("one") as context_one: - context_one.test_key = "one" + context_one.request = "one" wait_1_deferred = kr.wait_for_previous_lookups( ["server1"], @@ -96,7 +96,7 @@ class KeyringTestCase(unittest.TestCase): wait_1_deferred.addBoth(self.check_context, "one") with LoggingContext("two") as context_two: - context_two.test_key = "two" + context_two.request = "two" # set off another wait. It should block because the first lookup # hasn't yet completed. @@ -137,7 +137,7 @@ class KeyringTestCase(unittest.TestCase): @defer.inlineCallbacks def get_perspectives(**kwargs): self.assertEquals( - LoggingContext.current_context().test_key, "11", + LoggingContext.current_context().request, "11", ) with logcontext.PreserveLoggingContext(): yield persp_deferred @@ -145,7 +145,7 @@ class KeyringTestCase(unittest.TestCase): self.http_client.post_json.side_effect = get_perspectives with LoggingContext("11") as context_11: - context_11.test_key = "11" + context_11.request = "11" # start off a first set of lookups res_deferreds = kr.verify_json_objects_for_server( @@ -173,7 +173,7 @@ class KeyringTestCase(unittest.TestCase): self.assertIs(LoggingContext.current_context(), context_11) context_12 = LoggingContext("12") - context_12.test_key = "12" + context_12.request = "12" with logcontext.PreserveLoggingContext(context_12): # a second request for a server with outstanding requests # should block rather than start a second call @@ -211,7 +211,7 @@ class KeyringTestCase(unittest.TestCase): sentinel_context = LoggingContext.current_context() with LoggingContext("one") as context_one: - context_one.test_key = "one" + context_one.request = "one" defer = kr.verify_json_for_server("server9", {}) try: diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index e2f7765f49..4850722bc5 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -12,12 +12,12 @@ class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): self.assertEquals( - LoggingContext.current_context().test_key, value + LoggingContext.current_context().request, value ) def test_with_context(self): with LoggingContext() as context_one: - context_one.test_key = "test" + context_one.request = "test" self._check_test_key("test") @defer.inlineCallbacks @@ -25,14 +25,14 @@ class LoggingContextTestCase(unittest.TestCase): @defer.inlineCallbacks def competing_callback(): with LoggingContext() as competing_context: - competing_context.test_key = "competing" + competing_context.request = "competing" yield sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) with LoggingContext() as context_one: - context_one.test_key = "one" + context_one.request = "one" yield sleep(0) self._check_test_key("one") @@ -43,14 +43,14 @@ class LoggingContextTestCase(unittest.TestCase): @defer.inlineCallbacks def cb(): - context_one.test_key = "one" + context_one.request = "one" yield function() self._check_test_key("one") callback_completed[0] = True with LoggingContext() as context_one: - context_one.test_key = "one" + context_one.request = "one" # fire off function, but don't wait on it. logcontext.preserve_fn(cb)() @@ -107,7 +107,7 @@ class LoggingContextTestCase(unittest.TestCase): sentinel_context = LoggingContext.current_context() with LoggingContext() as context_one: - context_one.test_key = "one" + context_one.request = "one" d1 = logcontext.make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable @@ -124,7 +124,7 @@ class LoggingContextTestCase(unittest.TestCase): argument isn't actually a deferred""" with LoggingContext() as context_one: - context_one.test_key = "one" + context_one.request = "one" d1 = logcontext.make_deferred_yieldable("bum") self._check_test_key("one") -- cgit 1.5.1 From 6324b65f08b3f8dbfee6fef0079e2a87cb1c2c85 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 11 Jan 2018 18:17:54 +0000 Subject: Track db txn time in millisecs ... to reduce the amount of floating-point foo we do. --- synapse/http/server.py | 4 +++- synapse/http/site.py | 6 +++--- synapse/util/logcontext.py | 9 ++++++--- synapse/util/metrics.py | 8 +++++--- 4 files changed, 17 insertions(+), 10 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/server.py b/synapse/http/server.py index 269b65ca41..0f30e6fd56 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -93,6 +93,8 @@ response_db_txn_count = metrics.register_counter( ), ) +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], alternative_names=( @@ -377,7 +379,7 @@ class RequestMetrics(object): context.db_txn_count, request.method, self.name, tag ) response_db_txn_duration.inc_by( - context.db_txn_duration, request.method, self.name, tag + context.db_txn_duration_ms / 1000., request.method, self.name, tag ) diff --git a/synapse/http/site.py b/synapse/http/site.py index cd1492b1c3..dc64f0f6f5 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -66,10 +66,10 @@ class SynapseRequest(Request): context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count - db_txn_duration = context.db_txn_duration + db_txn_duration_ms = context.db_txn_duration_ms except Exception: ru_utime, ru_stime = (0, 0) - db_txn_count, db_txn_duration = (0, 0) + db_txn_count, db_txn_duration_ms = (0, 0) self.site.access_logger.info( "%s - %s - {%s}" @@ -81,7 +81,7 @@ class SynapseRequest(Request): int(time.time() * 1000) - self.start_time, int(ru_utime * 1000), int(ru_stime * 1000), - int(db_txn_duration * 1000), + db_txn_duration_ms, int(db_txn_count), self.sentLength, self.code, diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index ca71a1fc27..a78e53812f 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -59,7 +59,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration", "usage_start", "usage_end", + "db_txn_count", "db_txn_duration_ms", "usage_start", "usage_end", "main_thread", "alive", "request", "tag", ] @@ -97,7 +97,10 @@ class LoggingContext(object): self.ru_stime = 0. self.ru_utime = 0. self.db_txn_count = 0 - self.db_txn_duration = 0. + + # ms spent waiting for db txns, excluding scheduling time + self.db_txn_duration_ms = 0 + self.usage_start = None self.usage_end = None self.main_thread = threading.current_thread() @@ -205,7 +208,7 @@ class LoggingContext(object): def add_database_transaction(self, duration_ms): self.db_txn_count += 1 - self.db_txn_duration += duration_ms / 1000. + self.db_txn_duration_ms += duration_ms class LoggingContextFilter(logging.Filter): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 8d22ff3068..d25629cc50 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -72,6 +72,7 @@ block_db_txn_count = metrics.register_counter( ), ) +# seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], alternative_names=( @@ -95,7 +96,7 @@ def measure_func(name): class Measure(object): __slots__ = [ "clock", "name", "start_context", "start", "new_context", "ru_utime", - "ru_stime", "db_txn_count", "db_txn_duration", "created_context" + "ru_stime", "db_txn_count", "db_txn_duration_ms", "created_context" ] def __init__(self, clock, name): @@ -115,7 +116,7 @@ class Measure(object): self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count - self.db_txn_duration = self.start_context.db_txn_duration + self.db_txn_duration_ms = self.start_context.db_txn_duration_ms def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: @@ -145,7 +146,8 @@ class Measure(object): context.db_txn_count - self.db_txn_count, self.name ) block_db_txn_duration.inc_by( - context.db_txn_duration - self.db_txn_duration, self.name + (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000., + self.name ) if self.created_context: -- cgit 1.5.1 From 3d12d97415ac6d6a4ab8188af31c7df12c5d19f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Jan 2018 00:27:14 +0000 Subject: Track DB scheduling delay per-request For each request, track the amount of time spent waiting for a db connection. This entails adding it to the LoggingContext and we may as well add metrics for it while we are passing. --- synapse/http/server.py | 7 +++++++ synapse/http/site.py | 4 +++- synapse/storage/_base.py | 4 +++- synapse/util/logcontext.py | 18 +++++++++++++++++- synapse/util/metrics.py | 14 +++++++++++++- 5 files changed, 43 insertions(+), 4 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/server.py b/synapse/http/server.py index 0f30e6fd56..7b6418bc2c 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -102,6 +102,10 @@ response_db_txn_duration = metrics.register_counter( ), ) +# seconds spent waiting for a db connection, when processing this request +response_db_sched_duration = metrics.register_counter( + "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +) _next_request_id = 0 @@ -381,6 +385,9 @@ class RequestMetrics(object): response_db_txn_duration.inc_by( context.db_txn_duration_ms / 1000., request.method, self.name, tag ) + response_db_sched_duration.inc_by( + context.db_sched_duration_ms / 1000., request.method, self.name, tag + ) class RootRedirect(resource.Resource): diff --git a/synapse/http/site.py b/synapse/http/site.py index dc64f0f6f5..e422c8dfae 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -67,13 +67,14 @@ class SynapseRequest(Request): ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count db_txn_duration_ms = context.db_txn_duration_ms + db_sched_duration_ms = context.db_sched_duration_ms except Exception: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration_ms = (0, 0) self.site.access_logger.info( "%s - %s - {%s}" - " Processed request: %dms (%dms, %dms) (%dms/%d)" + " Processed request: %dms (%dms, %dms) (%dms/%dms/%d)" " %sB %s \"%s %s %s\" \"%s\"", self.getClientIP(), self.site.site_tag, @@ -81,6 +82,7 @@ class SynapseRequest(Request): int(time.time() * 1000) - self.start_time, int(ru_utime * 1000), int(ru_stime * 1000), + db_sched_duration_ms, db_txn_duration_ms, int(db_txn_count), self.sentLength, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 986617674c..68125006eb 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -347,7 +347,9 @@ class SQLBaseStore(object): def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection") as context: - sql_scheduling_timer.inc_by(time.time() * 1000 - start_time) + sched_duration_ms = time.time() * 1000 - start_time + sql_scheduling_timer.inc_by(sched_duration_ms) + current_context.add_database_scheduled(sched_duration_ms) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a78e53812f..94fa7cac98 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -59,7 +59,8 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration_ms", "usage_start", "usage_end", + "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "usage_start", "usage_end", "main_thread", "alive", "request", "tag", ] @@ -86,6 +87,9 @@ class LoggingContext(object): def add_database_transaction(self, duration_ms): pass + def add_database_scheduled(self, sched_ms): + pass + def __nonzero__(self): return False @@ -101,6 +105,9 @@ class LoggingContext(object): # ms spent waiting for db txns, excluding scheduling time self.db_txn_duration_ms = 0 + # ms spent waiting for db txns to be scheduled + self.db_sched_duration_ms = 0 + self.usage_start = None self.usage_end = None self.main_thread = threading.current_thread() @@ -210,6 +217,15 @@ class LoggingContext(object): self.db_txn_count += 1 self.db_txn_duration_ms += duration_ms + def add_database_scheduled(self, sched_ms): + """Record a use of the database pool + + Args: + sched_ms (int): number of milliseconds it took us to get a + connection + """ + self.db_sched_duration_ms += sched_ms + class LoggingContextFilter(logging.Filter): """Logging filter that adds values from the current logging context to each diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index d25629cc50..059bb7fedf 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -80,6 +80,11 @@ block_db_txn_duration = metrics.register_counter( ), ) +# seconds spent waiting for a db connection, in this block +block_db_sched_duration = metrics.register_counter( + "block_db_sched_duration_seconds", labels=["block_name"], +) + def measure_func(name): def wrapper(func): @@ -96,7 +101,9 @@ def measure_func(name): class Measure(object): __slots__ = [ "clock", "name", "start_context", "start", "new_context", "ru_utime", - "ru_stime", "db_txn_count", "db_txn_duration_ms", "created_context" + "ru_stime", + "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "created_context", ] def __init__(self, clock, name): @@ -117,6 +124,7 @@ class Measure(object): self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count self.db_txn_duration_ms = self.start_context.db_txn_duration_ms + self.db_sched_duration_ms = self.start_context.db_sched_duration_ms def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: @@ -149,6 +157,10 @@ class Measure(object): (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000., self.name ) + block_db_sched_duration.inc_by( + (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000., + self.name + ) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.5.1 From bc67e7d260631d3fa7bc78653376e15dc0771364 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jan 2018 16:43:03 +0000 Subject: Add decent impl of a FileConsumer Twisted core doesn't have a general purpose one, so we need to write one ourselves. Features: - All writing happens in background thread - Supports both push and pull producers - Push producers get paused if the consumer falls behind --- synapse/util/file_consumer.py | 158 +++++++++++++++++++++++++++++++++++++++ tests/util/test_file_consumer.py | 138 ++++++++++++++++++++++++++++++++++ 2 files changed, 296 insertions(+) create mode 100644 synapse/util/file_consumer.py create mode 100644 tests/util/test_file_consumer.py (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py new file mode 100644 index 0000000000..de478fcb3e --- /dev/null +++ b/synapse/util/file_consumer.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vecotr Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer, threads, reactor + +from synapse.util.logcontext import make_deferred_yieldable + +import Queue + + +class BackgroundFileConsumer(object): + """A consumer that writes to a file like object. Supports both push + and pull producers + + Args: + file_obj (file): The file like object to write to. Closed when + finished. + """ + + # For PushProducers pause if we have this many unwritten slices + _PAUSE_ON_QUEUE_SIZE = 5 + # And resume once the size of the queue is less than this + _RESUME_ON_QUEUE_SIZE = 2 + + def __init__(self, file_obj): + self.file_obj = file_obj + + # Producer we're registered with + self.producer = None + + # True if PushProducer, false if PullProducer + self.streaming = False + + # Queue of slices of bytes to be written. When producer calls + # unregister a final None is sent. + self.bytes_queue = Queue.Queue() + + # Deferred that is resolved when finished writing + self.finished_deferred = None + + # If the _writer thread throws an exception it gets stored here. + self._write_exception = None + + # A deferred that gets resolved when the bytes_queue gets empty. + # Mainly used for tests. + self._notify_empty_deferred = None + + def registerProducer(self, producer, streaming): + """Part of IProducer interface + + Args: + producer (IProducer) + streaming (bool): True if push based producer, False if pull + based. + """ + self.producer = producer + self.streaming = streaming + self.finished_deferred = threads.deferToThread(self._writer) + if not streaming: + self.producer.resumeProducing() + + self.paused_producer = False + + def unregisterProducer(self): + """Part of IProducer interface + """ + self.producer = None + if not self.finished_deferred.called: + self.bytes_queue.put_nowait(None) + + def write(self, bytes): + """Part of IProducer interface + """ + if self._write_exception: + raise self._write_exception + + if self.finished_deferred.called: + raise Exception("consumer has closed") + + self.bytes_queue.put_nowait(bytes) + + # If this is a pushed based consumer and the queue is getting behind + # then we pause the producer. + if self.streaming and self.bytes_queue.qsize() >= self._PAUSE_ON_QUEUE_SIZE: + self.paused_producer = True + self.producer.pauseProducing() + + def _writer(self): + """This is run in a background thread to write to the file. + """ + try: + while self.producer or not self.bytes_queue.empty(): + # If we've paused the producer check if we should resume the + # producer. + if self.producer and self.paused_producer: + if self.bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: + reactor.callFromThread(self._resume_paused_producer) + + if self._notify_empty and self.bytes_queue.empty(): + reactor.callFromThread(self._notify_empty) + + bytes = self.bytes_queue.get() + + # If we get a None (or empty list) then that's a signal used + # to indicate we should check if we should stop. + if bytes: + self.file_obj.write(bytes) + + # If its a pull producer then we need to explicitly ask for + # more stuff. + if not self.streaming and self.producer: + reactor.callFromThread(self.producer.resumeProducing) + except Exception as e: + self._write_exception = e + raise + finally: + self.file_obj.close() + + def wait(self): + """Returns a deferred that resolves when finished writing to file + """ + return make_deferred_yieldable(self.finished_deferred) + + def _resume_paused_producer(self): + """Gets called if we should resume producing after being paused + """ + if self.paused_producer and self.producer: + self.paused_producer = False + self.producer.resumeProducing() + + def _notify_empty(self): + """Called when the _writer thread thinks the queue may be empty and + we should notify anything waiting on `wait_for_writes` + """ + if self._notify_empty_deferred and self.bytes_queue.empty(): + d = self._notify_empty_deferred + self._notify_empty_deferred = None + d.callback(None) + + def wait_for_writes(self): + """Wait for the write queue to be empty and for writes to have + finished. This is mainly useful for tests. + """ + if not self._notify_empty_deferred: + self._notify_empty_deferred = defer.Deferred() + return self._notify_empty_deferred diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py new file mode 100644 index 0000000000..8acb68f0c3 --- /dev/null +++ b/tests/util/test_file_consumer.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.internet import defer +from mock import NonCallableMock + +from synapse.util.file_consumer import BackgroundFileConsumer + +from tests import unittest +from StringIO import StringIO + +import threading + + +class FileConsumerTests(unittest.TestCase): + + @defer.inlineCallbacks + def test_pull_consumer(self): + string_file = StringIO() + consumer = BackgroundFileConsumer(string_file) + + try: + producer = DummyPullProducer() + + yield producer.register_with_consumer(consumer) + + yield producer.write_and_wait("Foo") + + self.assertEqual(string_file.getvalue(), "Foo") + + yield producer.write_and_wait("Bar") + + self.assertEqual(string_file.getvalue(), "FooBar") + finally: + consumer.unregisterProducer() + + yield consumer.wait() + + self.assertTrue(string_file.closed) + + @defer.inlineCallbacks + def test_push_consumer(self): + string_file = StringIO() + consumer = BackgroundFileConsumer(string_file) + + try: + producer = NonCallableMock(spec_set=[]) + + consumer.registerProducer(producer, True) + + consumer.write("Foo") + yield consumer.wait_for_writes() + + self.assertEqual(string_file.getvalue(), "Foo") + + consumer.write("Bar") + yield consumer.wait_for_writes() + + self.assertEqual(string_file.getvalue(), "FooBar") + finally: + consumer.unregisterProducer() + + yield consumer.wait() + + self.assertTrue(string_file.closed) + + @defer.inlineCallbacks + def test_push_producer_feedback(self): + string_file = BlockingStringWrite() + consumer = BackgroundFileConsumer(string_file) + + try: + producer = NonCallableMock(spec_set=["pauseProducing", "resumeProducing"]) + + consumer.registerProducer(producer, True) + + with string_file.write_lock: + for _ in range(consumer._PAUSE_ON_QUEUE_SIZE): + consumer.write("Foo") + + producer.pauseProducing.assert_called_once() + + yield consumer.wait_for_writes() + producer.resumeProducing.assert_called_once() + finally: + consumer.unregisterProducer() + + yield consumer.wait() + + self.assertTrue(string_file.closed) + + +class DummyPullProducer(object): + def __init__(self): + self.consumer = None + self.deferred = defer.Deferred() + + def resumeProducing(self): + d = self.deferred + self.deferred = defer.Deferred() + d.callback(None) + + def write_and_wait(self, bytes): + d = self.deferred + self.consumer.write(bytes) + return d + + def register_with_consumer(self, consumer): + d = self.deferred + self.consumer = consumer + self.consumer.registerProducer(self, False) + return d + + +class BlockingStringWrite(object): + def __init__(self): + self.buffer = "" + self.closed = False + self.write_lock = threading.Lock() + + def write(self, bytes): + self.buffer += bytes + + def close(self): + self.closed = True -- cgit 1.5.1 From a177325b49be4793c8ed21147f8d301a0649a2b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:02:43 +0000 Subject: Fix comments --- synapse/util/file_consumer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index de478fcb3e..5284c7967e 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 New Vecotr Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -58,7 +58,7 @@ class BackgroundFileConsumer(object): self._notify_empty_deferred = None def registerProducer(self, producer, streaming): - """Part of IProducer interface + """Part of IConsumer interface Args: producer (IProducer) @@ -91,7 +91,7 @@ class BackgroundFileConsumer(object): self.bytes_queue.put_nowait(bytes) - # If this is a pushed based consumer and the queue is getting behind + # If this is a PushProducer and the queue is getting behind # then we pause the producer. if self.streaming and self.bytes_queue.qsize() >= self._PAUSE_ON_QUEUE_SIZE: self.paused_producer = True -- cgit 1.5.1 From 28b338ed9bafc2017a635848e14a2a25b78d0016 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:04:41 +0000 Subject: Move definition of paused_producer to __init__ --- synapse/util/file_consumer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 5284c7967e..54c9da9573 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -43,6 +43,10 @@ class BackgroundFileConsumer(object): # True if PushProducer, false if PullProducer self.streaming = False + # For PushProducers, indicates whether we've paused the producer and + # need to call resumeProducing before we get more data. + self.paused_producer = False + # Queue of slices of bytes to be written. When producer calls # unregister a final None is sent. self.bytes_queue = Queue.Queue() @@ -71,8 +75,6 @@ class BackgroundFileConsumer(object): if not streaming: self.producer.resumeProducing() - self.paused_producer = False - def unregisterProducer(self): """Part of IProducer interface """ -- cgit 1.5.1 From 17b54389feb3855a33406149a8a59f0327bb3ad1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:05:34 +0000 Subject: Fix _notify_empty typo --- synapse/util/file_consumer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 54c9da9573..479e480614 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -110,7 +110,7 @@ class BackgroundFileConsumer(object): if self.bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: reactor.callFromThread(self._resume_paused_producer) - if self._notify_empty and self.bytes_queue.empty(): + if self._notify_empty_deferred and self.bytes_queue.empty(): reactor.callFromThread(self._notify_empty) bytes = self.bytes_queue.get() -- cgit 1.5.1 From dc519602ac0f35d39a70c91f0e6057e865a61dfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:07:17 +0000 Subject: Ensure we registerProducer isn't called twice --- synapse/util/file_consumer.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 479e480614..d7bbb0aeb8 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -69,6 +69,9 @@ class BackgroundFileConsumer(object): streaming (bool): True if push based producer, False if pull based. """ + if self.producer: + raise Exception("registerProducer called twice") + self.producer = producer self.streaming = streaming self.finished_deferred = threads.deferToThread(self._writer) -- cgit 1.5.1 From 2f18a2647b6b9cc07c3cc5f2bec3e1bab67d0eea Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:10:12 +0000 Subject: Make all fields private --- synapse/util/file_consumer.py | 62 +++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 31 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index d7bbb0aeb8..d19d48665c 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -35,24 +35,24 @@ class BackgroundFileConsumer(object): _RESUME_ON_QUEUE_SIZE = 2 def __init__(self, file_obj): - self.file_obj = file_obj + self._file_obj = file_obj # Producer we're registered with - self.producer = None + self._producer = None # True if PushProducer, false if PullProducer self.streaming = False # For PushProducers, indicates whether we've paused the producer and # need to call resumeProducing before we get more data. - self.paused_producer = False + self._paused_producer = False # Queue of slices of bytes to be written. When producer calls # unregister a final None is sent. - self.bytes_queue = Queue.Queue() + self._bytes_queue = Queue.Queue() # Deferred that is resolved when finished writing - self.finished_deferred = None + self._finished_deferred = None # If the _writer thread throws an exception it gets stored here. self._write_exception = None @@ -69,21 +69,21 @@ class BackgroundFileConsumer(object): streaming (bool): True if push based producer, False if pull based. """ - if self.producer: + if self._producer: raise Exception("registerProducer called twice") - self.producer = producer + self._producer = producer self.streaming = streaming - self.finished_deferred = threads.deferToThread(self._writer) + self._finished_deferred = threads.deferToThread(self._writer) if not streaming: - self.producer.resumeProducing() + self._producer.resumeProducing() def unregisterProducer(self): """Part of IProducer interface """ - self.producer = None - if not self.finished_deferred.called: - self.bytes_queue.put_nowait(None) + self._producer = None + if not self._finished_deferred.called: + self._bytes_queue.put_nowait(None) def write(self, bytes): """Part of IProducer interface @@ -91,65 +91,65 @@ class BackgroundFileConsumer(object): if self._write_exception: raise self._write_exception - if self.finished_deferred.called: + if self._finished_deferred.called: raise Exception("consumer has closed") - self.bytes_queue.put_nowait(bytes) + self._bytes_queue.put_nowait(bytes) # If this is a PushProducer and the queue is getting behind # then we pause the producer. - if self.streaming and self.bytes_queue.qsize() >= self._PAUSE_ON_QUEUE_SIZE: - self.paused_producer = True - self.producer.pauseProducing() + if self.streaming and self._bytes_queue.qsize() >= self._PAUSE_ON_QUEUE_SIZE: + self._paused_producer = True + self._producer.pauseProducing() def _writer(self): """This is run in a background thread to write to the file. """ try: - while self.producer or not self.bytes_queue.empty(): + while self._producer or not self._bytes_queue.empty(): # If we've paused the producer check if we should resume the # producer. - if self.producer and self.paused_producer: - if self.bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: + if self._producer and self._paused_producer: + if self._bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: reactor.callFromThread(self._resume_paused_producer) - if self._notify_empty_deferred and self.bytes_queue.empty(): + if self._notify_empty_deferred and self._bytes_queue.empty(): reactor.callFromThread(self._notify_empty) - bytes = self.bytes_queue.get() + bytes = self._bytes_queue.get() # If we get a None (or empty list) then that's a signal used # to indicate we should check if we should stop. if bytes: - self.file_obj.write(bytes) + self._file_obj.write(bytes) # If its a pull producer then we need to explicitly ask for # more stuff. - if not self.streaming and self.producer: - reactor.callFromThread(self.producer.resumeProducing) + if not self.streaming and self._producer: + reactor.callFromThread(self._producer.resumeProducing) except Exception as e: self._write_exception = e raise finally: - self.file_obj.close() + self._file_obj.close() def wait(self): """Returns a deferred that resolves when finished writing to file """ - return make_deferred_yieldable(self.finished_deferred) + return make_deferred_yieldable(self._finished_deferred) def _resume_paused_producer(self): """Gets called if we should resume producing after being paused """ - if self.paused_producer and self.producer: - self.paused_producer = False - self.producer.resumeProducing() + if self._paused_producer and self._producer: + self._paused_producer = False + self._producer.resumeProducing() def _notify_empty(self): """Called when the _writer thread thinks the queue may be empty and we should notify anything waiting on `wait_for_writes` """ - if self._notify_empty_deferred and self.bytes_queue.empty(): + if self._notify_empty_deferred and self._bytes_queue.empty(): d = self._notify_empty_deferred self._notify_empty_deferred = None d.callback(None) -- cgit 1.5.1 From 1432f7ccd5a01e43d0c5417f3d2f4a6a0fbf5bfb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:53:21 +0000 Subject: Move test stuff to tests --- synapse/util/file_consumer.py | 26 +------------------ tests/util/test_file_consumer.py | 54 ++++++++++++++++++++++++++++++++++------ 2 files changed, 47 insertions(+), 33 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index d19d48665c..3241035247 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, threads, reactor +from twisted.internet import threads, reactor from synapse.util.logcontext import make_deferred_yieldable @@ -57,10 +57,6 @@ class BackgroundFileConsumer(object): # If the _writer thread throws an exception it gets stored here. self._write_exception = None - # A deferred that gets resolved when the bytes_queue gets empty. - # Mainly used for tests. - self._notify_empty_deferred = None - def registerProducer(self, producer, streaming): """Part of IConsumer interface @@ -113,9 +109,6 @@ class BackgroundFileConsumer(object): if self._bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: reactor.callFromThread(self._resume_paused_producer) - if self._notify_empty_deferred and self._bytes_queue.empty(): - reactor.callFromThread(self._notify_empty) - bytes = self._bytes_queue.get() # If we get a None (or empty list) then that's a signal used @@ -144,20 +137,3 @@ class BackgroundFileConsumer(object): if self._paused_producer and self._producer: self._paused_producer = False self._producer.resumeProducing() - - def _notify_empty(self): - """Called when the _writer thread thinks the queue may be empty and - we should notify anything waiting on `wait_for_writes` - """ - if self._notify_empty_deferred and self._bytes_queue.empty(): - d = self._notify_empty_deferred - self._notify_empty_deferred = None - d.callback(None) - - def wait_for_writes(self): - """Wait for the write queue to be empty and for writes to have - finished. This is mainly useful for tests. - """ - if not self._notify_empty_deferred: - self._notify_empty_deferred = defer.Deferred() - return self._notify_empty_deferred diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index 8acb68f0c3..76e2234255 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -14,7 +14,7 @@ # limitations under the License. -from twisted.internet import defer +from twisted.internet import defer, reactor from mock import NonCallableMock from synapse.util.file_consumer import BackgroundFileConsumer @@ -53,7 +53,7 @@ class FileConsumerTests(unittest.TestCase): @defer.inlineCallbacks def test_push_consumer(self): - string_file = StringIO() + string_file = BlockingStringWrite() consumer = BackgroundFileConsumer(string_file) try: @@ -62,14 +62,14 @@ class FileConsumerTests(unittest.TestCase): consumer.registerProducer(producer, True) consumer.write("Foo") - yield consumer.wait_for_writes() + yield string_file.wait_for_n_writes(1) - self.assertEqual(string_file.getvalue(), "Foo") + self.assertEqual(string_file.buffer, "Foo") consumer.write("Bar") - yield consumer.wait_for_writes() + yield string_file.wait_for_n_writes(2) - self.assertEqual(string_file.getvalue(), "FooBar") + self.assertEqual(string_file.buffer, "FooBar") finally: consumer.unregisterProducer() @@ -85,15 +85,22 @@ class FileConsumerTests(unittest.TestCase): try: producer = NonCallableMock(spec_set=["pauseProducing", "resumeProducing"]) + resume_deferred = defer.Deferred() + producer.resumeProducing.side_effect = lambda: resume_deferred.callback(None) + consumer.registerProducer(producer, True) + number_writes = 0 with string_file.write_lock: for _ in range(consumer._PAUSE_ON_QUEUE_SIZE): consumer.write("Foo") + number_writes += 1 producer.pauseProducing.assert_called_once() - yield consumer.wait_for_writes() + yield string_file.wait_for_n_writes(number_writes) + + yield resume_deferred producer.resumeProducing.assert_called_once() finally: consumer.unregisterProducer() @@ -131,8 +138,39 @@ class BlockingStringWrite(object): self.closed = False self.write_lock = threading.Lock() + self._notify_write_deferred = None + self._number_of_writes = 0 + def write(self, bytes): - self.buffer += bytes + with self.write_lock: + self.buffer += bytes + self._number_of_writes += 1 + + reactor.callFromThread(self._notify_write) def close(self): self.closed = True + + def _notify_write(self): + "Called by write to indicate a write happened" + with self.write_lock: + if not self._notify_write_deferred: + return + d = self._notify_write_deferred + self._notify_write_deferred = None + d.callback(None) + + @defer.inlineCallbacks + def wait_for_n_writes(self, n): + "Wait for n writes to have happened" + while True: + with self.write_lock: + if n <= self._number_of_writes: + return + + if not self._notify_write_deferred: + self._notify_write_deferred = defer.Deferred() + + d = self._notify_write_deferred + + yield d -- cgit 1.5.1 From be0dfcd4a29859f4c707c2b3cf1da38c5115d251 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jan 2018 11:57:23 +0000 Subject: Do logcontexts correctly --- synapse/util/file_consumer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 3241035247..90a2608d6f 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -15,7 +15,7 @@ from twisted.internet import threads, reactor -from synapse.util.logcontext import make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn import Queue @@ -70,7 +70,7 @@ class BackgroundFileConsumer(object): self._producer = producer self.streaming = streaming - self._finished_deferred = threads.deferToThread(self._writer) + self._finished_deferred = preserve_fn(threads.deferToThread)(self._writer) if not streaming: self._producer.resumeProducing() -- cgit 1.5.1 From d57765fc8a1b54dae001bbb97b2b529991292fbc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 18 Jan 2018 12:23:04 +0000 Subject: Fix bugs in block metrics ... which I introduced in #2785 --- synapse/util/metrics.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 059bb7fedf..e4b5687a4b 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) # total number of times we have hit this block -response_count = metrics.register_counter( +block_counter = metrics.register_counter( "block_count", labels=["block_name"], alternative_names=( @@ -76,7 +76,7 @@ block_db_txn_count = metrics.register_counter( block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", + metrics.name_prefix + "_block_db_txn_duration:total", ), ) @@ -131,6 +131,8 @@ class Measure(object): return duration = self.clock.time_msec() - self.start + + block_counter.inc(self.name) block_timer.inc_by(duration, self.name) context = LoggingContext.current_context() -- cgit 1.5.1 From 447f4f0d5f136dcadd5fdc286ded2d6e24a3f686 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 19 Jan 2018 15:33:55 +0000 Subject: rewrite based on PR feedback: * [ ] split config options into allowed_local_3pids and registrations_require_3pid * [ ] simplify and comment logic for picking registration flows * [ ] fix docstring and move check_3pid_allowed into a new util module * [ ] use check_3pid_allowed everywhere @erikjohnston PTAL --- synapse/config/registration.py | 12 +++-- synapse/handlers/register.py | 15 +++---- synapse/rest/client/v1/register.py | 20 +++------ synapse/rest/client/v2_alpha/_base.py | 21 --------- synapse/rest/client/v2_alpha/account.py | 3 +- synapse/rest/client/v2_alpha/register.py | 75 +++++++++++++++----------------- synapse/util/threepids.py | 45 +++++++++++++++++++ 7 files changed, 102 insertions(+), 89 deletions(-) create mode 100644 synapse/util/threepids.py (limited to 'synapse/util') diff --git a/synapse/config/registration.py b/synapse/config/registration.py index e5e4f77872..336959094b 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -32,6 +32,7 @@ class RegistrationConfig(Config): ) self.registrations_require_3pid = config.get("registrations_require_3pid", []) + self.allowed_local_3pids = config.get("allowed_local_3pids", []) self.registration_shared_secret = config.get("registration_shared_secret") self.bcrypt_rounds = config.get("bcrypt_rounds", 12) @@ -53,11 +54,16 @@ class RegistrationConfig(Config): # Enable registration for new users. enable_registration: False - # Mandate that registrations require a 3PID which matches one or more - # of these 3PIDs. N.B. regexp escape backslashes are doubled (once for - # YAML and once for the regexp itself) + # The user must provide all of the below types of 3PID when registering. # # registrations_require_3pid: + # - email + # - msisdn + + # Mandate that users are only allowed to associate certain formats of + # 3PIDs with accounts on this server. + # + # allowed_local_3pids: # - medium: email # pattern: ".*@matrix\\.org" # - medium: email diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 157ebaf251..9021d4d57f 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -15,7 +15,6 @@ """Contains functions for registering clients.""" import logging -import re from twisted.internet import defer @@ -26,6 +25,7 @@ from synapse.http.client import CaptchaServerHttpClient from synapse import types from synapse.types import UserID from synapse.util.async import run_on_reactor +from synapse.util.threepids import check_3pid_allowed from ._base import BaseHandler logger = logging.getLogger(__name__) @@ -308,15 +308,10 @@ class RegistrationHandler(BaseHandler): logger.info("got threepid with medium '%s' and address '%s'", threepid['medium'], threepid['address']) - for constraint in self.hs.config.registrations_require_3pid: - if ( - constraint['medium'] == 'email' and - threepid['medium'] == 'email' and - re.match(constraint['pattern'], threepid['address']) - ): - raise RegistrationError( - 403, "Third party identifier is not allowed" - ) + if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']): + raise RegistrationError( + 403, "Third party identifier is not allowed" + ) @defer.inlineCallbacks def bind_emails(self, user_id, threepidCreds): diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index f793542ad6..5c5fa8f7ab 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -71,22 +71,13 @@ class RegisterRestServlet(ClientV1RestServlet): def on_GET(self, request): - require_email = False - require_msisdn = False - for constraint in self.hs.config.registrations_require_3pid: - if constraint['medium'] == 'email': - require_email = True - elif constraint['medium'] == 'msisdn': - require_msisdn = True - else: - logger.warn( - "Unrecognised 3PID medium %s in registrations_require_3pid" % - constraint['medium'] - ) + require_email = 'email' in self.hs.config.registrations_require_3pid + require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid flows = [] if self.hs.config.enable_registration_captcha: - if require_email or not require_msisdn: + # only support the email-only flow if we don't require MSISDN 3PIDs + if not require_msisdn: flows.extend([ { "type": LoginType.RECAPTCHA, @@ -97,6 +88,7 @@ class RegisterRestServlet(ClientV1RestServlet): ] }, ]) + # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: flows.extend([ { @@ -105,6 +97,7 @@ class RegisterRestServlet(ClientV1RestServlet): } ]) else: + # only support the email-only flow if we don't require MSISDN 3PIDs if require_email or not require_msisdn: flows.extend([ { @@ -114,6 +107,7 @@ class RegisterRestServlet(ClientV1RestServlet): ] } ]) + # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: flows.extend([ { diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index b286ff0d95..77434937ff 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -60,27 +60,6 @@ def set_timeline_upper_limit(filter_json, filter_timeline_limit): filter_timeline_limit) -def check_3pid_allowed(hs, medium, address): - # check whether the HS has whitelisted the given 3PID - - allow = False - if hs.config.registrations_require_3pid: - for constraint in hs.config.registrations_require_3pid: - logger.debug("Checking 3PID %s (%s) against %s (%s)" % ( - address, medium, constraint['pattern'], constraint['medium'] - )) - if ( - medium == constraint['medium'] and - re.match(constraint['pattern'], address) - ): - allow = True - break - else: - allow = True - - return allow - - def interactive_auth_handler(orig): """Wraps an on_POST method to handle InteractiveAuthIncompleteErrors diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 2977ad439f..514bb37da1 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -26,7 +26,8 @@ from synapse.http.servlet import ( ) from synapse.util.async import run_on_reactor from synapse.util.msisdn import phone_number_to_msisdn -from ._base import client_v2_patterns, interactive_auth_handler, check_3pid_allowed +from synapse.util.threepids import check_3pid_allowed +from ._base import client_v2_patterns, interactive_auth_handler logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 898d8b133a..c3479e29de 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -26,11 +26,11 @@ from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string ) from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler, check_3pid_allowed +from ._base import client_v2_patterns, interactive_auth_handler import logging -import re import hmac from hashlib import sha1 from synapse.util.async import run_on_reactor @@ -316,41 +316,41 @@ class RegisterRestServlet(RestServlet): if 'x_show_msisdn' in body and body['x_show_msisdn']: show_msisdn = True - require_email = False - require_msisdn = False - for constraint in self.hs.config.registrations_require_3pid: - if constraint['medium'] == 'email': - require_email = True - elif constraint['medium'] == 'msisdn': - require_msisdn = True - else: - logger.warn( - "Unrecognised 3PID medium %s in registrations_require_3pid" % - constraint['medium'] - ) + # FIXME: need a better error than "no auth flow found" for scenarios + # where we required 3PID for registration but the user didn't give one + require_email = 'email' in self.hs.config.registrations_require_3pid + require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid flows = [] if self.hs.config.enable_registration_captcha: + # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: flows.extend([[LoginType.RECAPTCHA]]) - if require_email or not require_msisdn: + # only support the email-only flow if we don't require MSISDN 3PIDs + if not require_msisdn: flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]]) if show_msisdn: - if not require_email or require_msisdn: + # only support the MSISDN-only flow if we don't require email 3PIDs + if not require_email: flows.extend([[LoginType.MSISDN, LoginType.RECAPTCHA]]) + # always let users provide both MSISDN & email flows.extend([ [LoginType.MSISDN, LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA], ]) else: + # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: flows.extend([[LoginType.DUMMY]]) - if require_email or not require_msisdn: + # only support the email-only flow if we don't require MSISDN 3PIDs + if not require_msisdn: flows.extend([[LoginType.EMAIL_IDENTITY]]) if show_msisdn: + # only support the MSISDN-only flow if we don't require email 3PIDs if not require_email or require_msisdn: flows.extend([[LoginType.MSISDN]]) + # always let users provide both MSISDN & email flows.extend([ [LoginType.MSISDN, LoginType.EMAIL_IDENTITY] ]) @@ -359,30 +359,23 @@ class RegisterRestServlet(RestServlet): flows, body, self.hs.get_ip_from_request(request) ) - # doublecheck that we're not trying to register an denied 3pid. - # the user-facing checks should already have happened when we requested - # a 3PID token to validate them in /register/email/requestToken etc - - for constraint in self.hs.config.registrations_require_3pid: - if ( - constraint['medium'] == 'email' and - auth_result and LoginType.EMAIL_IDENTITY in auth_result and - re.match( - constraint['pattern'], - auth_result[LoginType.EMAIL_IDENTITY].threepid.address - ) - ): - raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED - ) - elif ( - constraint['medium'] == 'msisdn' and - auth_result and LoginType.MSISDN in auth_result and - re.match( - constraint['pattern'], - auth_result[LoginType.MSISDN].threepid.address - ) - ): + # Check that we're not trying to register a denied 3pid. + # + # the user-facing checks will probably already have happened in + # /register/email/requestToken when we requested a 3pid, but that's not + # guaranteed. + + if ( + auth_result and + ( + LoginType.EMAIL_IDENTITY in auth_result or + LoginType.EMAIL_MSISDN in auth_result + ) + ): + medium = auth_result[LoginType.EMAIL_IDENTITY].threepid['medium'] + address = auth_result[LoginType.EMAIL_IDENTITY].threepid['address'] + + if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED ) diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py new file mode 100644 index 0000000000..e921b97796 --- /dev/null +++ b/synapse/util/threepids.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re + +logger = logging.getLogger(__name__) + + +def check_3pid_allowed(hs, medium, address): + """Checks whether a given format of 3PID is allowed to be used on this HS + + Args: + hs (synapse.server.HomeServer): server + medium (str): 3pid medium - e.g. email, msisdn + address (str): address within that medium (e.g. "wotan@matrix.org") + msisdns need to first have been canonicalised + """ + + if hs.config.allowed_local_3pids: + for constraint in hs.config.allowed_local_3pids: + logger.debug("Checking 3PID %s (%s) against %s (%s)" % ( + address, medium, constraint['pattern'], constraint['medium'] + )) + if ( + medium == constraint['medium'] and + re.match(constraint['pattern'], address) + ): + return True + else: + return True + + return False -- cgit 1.5.1 From 8fe253f19b1c61c38111948cce00a7d260d2925a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 19 Jan 2018 18:23:45 +0000 Subject: fix PR nitpicking --- synapse/util/threepids.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index e921b97796..75efa0117b 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -27,13 +27,16 @@ def check_3pid_allowed(hs, medium, address): medium (str): 3pid medium - e.g. email, msisdn address (str): address within that medium (e.g. "wotan@matrix.org") msisdns need to first have been canonicalised + Returns: + bool: whether the 3PID medium/address is allowed to be added to this HS """ if hs.config.allowed_local_3pids: for constraint in hs.config.allowed_local_3pids: - logger.debug("Checking 3PID %s (%s) against %s (%s)" % ( - address, medium, constraint['pattern'], constraint['medium'] - )) + logger.debug( + "Checking 3PID %s (%s) against %s (%s)", + address, medium, constraint['pattern'], constraint['medium'], + ) if ( medium == constraint['medium'] and re.match(constraint['pattern'], address) -- cgit 1.5.1 From ab9f844aaf3662a64dbc4c56077e9fa37bc7d5d0 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 22 Jan 2018 19:11:18 +0100 Subject: Add federation_domain_whitelist option (#2820) Add federation_domain_whitelist gives a way to restrict which domains your HS is allowed to federate with. useful mainly for gracefully preventing a private but internet-connected HS from trying to federate to the wider public Matrix network --- synapse/api/errors.py | 26 ++++++++++++++++++++++++++ synapse/config/server.py | 22 ++++++++++++++++++++++ synapse/federation/federation_client.py | 5 ++++- synapse/federation/transaction_queue.py | 4 +++- synapse/federation/transport/client.py | 3 +++ synapse/federation/transport/server.py | 9 ++++++++- synapse/handlers/device.py | 4 ++++ synapse/handlers/e2e_keys.py | 8 +++++++- synapse/handlers/federation.py | 4 ++++ synapse/http/matrixfederationclient.py | 28 +++++++++++++++++++++++++++- synapse/rest/key/v2/remote_key_resource.py | 8 ++++++++ synapse/rest/media/v1/media_repository.py | 19 +++++++++++++++++-- synapse/util/retryutils.py | 12 ++++++++++++ tests/utils.py | 1 + 14 files changed, 146 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 46b0d7b34c..aa15f73f36 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -141,6 +141,32 @@ class RegistrationError(SynapseError): pass +class FederationDeniedError(SynapseError): + """An error raised when the server tries to federate with a server which + is not on its federation whitelist. + + Attributes: + destination (str): The destination which has been denied + """ + + def __init__(self, destination): + """Raised by federation client or server to indicate that we are + are deliberately not attempting to contact a given server because it is + not on our federation whitelist. + + Args: + destination (str): the domain in question + """ + + self.destination = destination + + super(FederationDeniedError, self).__init__( + code=403, + msg="Federation denied with %s." % (self.destination,), + errcode=Codes.FORBIDDEN, + ) + + class InteractiveAuthIncompleteError(Exception): """An error raised when UI auth is not yet complete diff --git a/synapse/config/server.py b/synapse/config/server.py index 436dd8a6fe..8f0b6d1f28 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -55,6 +55,17 @@ class ServerConfig(Config): "block_non_admin_invites", False, ) + # FIXME: federation_domain_whitelist needs sytests + self.federation_domain_whitelist = None + federation_domain_whitelist = config.get( + "federation_domain_whitelist", None + ) + # turn the whitelist into a hash for speed of lookup + if federation_domain_whitelist is not None: + self.federation_domain_whitelist = {} + for domain in federation_domain_whitelist: + self.federation_domain_whitelist[domain] = True + if self.public_baseurl is not None: if self.public_baseurl[-1] != '/': self.public_baseurl += '/' @@ -210,6 +221,17 @@ class ServerConfig(Config): # (except those sent by local server admins). The default is False. # block_non_admin_invites: True + # Restrict federation to the following whitelist of domains. + # N.B. we recommend also firewalling your federation listener to limit + # inbound federation traffic as early as possible, rather than relying + # purely on this application-layer restriction. If not specified, the + # default is to whitelist everything. + # + # federation_domain_whitelist: + # - lon.example.com + # - nyc.example.com + # - syd.example.com + # List of ports that Synapse should listen on, their purpose and their # configuration. listeners: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b1fe03f702..813907f7f2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -23,7 +23,7 @@ from twisted.internet import defer from synapse.api.constants import Membership from synapse.api.errors import ( - CodeMessageException, HttpResponseException, SynapseError, + CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError ) from synapse.events import builder from synapse.federation.federation_base import ( @@ -266,6 +266,9 @@ class FederationClient(FederationBase): except NotRetryingDestination as e: logger.info(e.message) continue + except FederationDeniedError as e: + logger.info(e.message) + continue except Exception as e: pdu_attempts[destination] = now diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 9d39f46583..a141ec9953 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -19,7 +19,7 @@ from twisted.internet import defer from .persistence import TransactionActions from .units import Transaction, Edu -from synapse.api.errors import HttpResponseException +from synapse.api.errors import HttpResponseException, FederationDeniedError from synapse.util import logcontext, PreserveLoggingContext from synapse.util.async import run_on_reactor from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter @@ -490,6 +490,8 @@ class TransactionQueue(object): (e.retry_last_ts + e.retry_interval) / 1000.0 ), ) + except FederationDeniedError as e: + logger.info(e) except Exception as e: logger.warn( "TX [%s] Failed to send transaction: %s", diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1f3ce238f6..5488e82985 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -212,6 +212,9 @@ class TransportLayerClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if the remote destination + is not in our federation whitelist """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2b02b021ec..06c16ba4fa 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.urls import FEDERATION_PREFIX as PREFIX -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, SynapseError, FederationDeniedError from synapse.http.server import JsonResource from synapse.http.servlet import ( parse_json_object_from_request, parse_integer_from_args, parse_string_from_args, @@ -81,6 +81,7 @@ class Authenticator(object): self.keyring = hs.get_keyring() self.server_name = hs.hostname self.store = hs.get_datastore() + self.federation_domain_whitelist = hs.config.federation_domain_whitelist # A method just so we can pass 'self' as the authenticator to the Servlets @defer.inlineCallbacks @@ -92,6 +93,12 @@ class Authenticator(object): "signatures": {}, } + if ( + self.federation_domain_whitelist is not None and + self.server_name not in self.federation_domain_whitelist + ): + raise FederationDeniedError(self.server_name) + if content is not None: json_request["content"] = content diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 2152efc692..0e83453851 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.api import errors from synapse.api.constants import EventTypes +from synapse.api.errors import FederationDeniedError from synapse.util import stringutils from synapse.util.async import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -513,6 +514,9 @@ class DeviceListEduUpdater(object): # This makes it more likely that the device lists will # eventually become consistent. return + except FederationDeniedError as e: + logger.info(e) + return except Exception: # TODO: Remember that we are now out of sync and try again # later diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 5af8abf66b..9aa95f89e6 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -19,7 +19,9 @@ import logging from canonicaljson import encode_canonical_json from twisted.internet import defer -from synapse.api.errors import SynapseError, CodeMessageException +from synapse.api.errors import ( + SynapseError, CodeMessageException, FederationDeniedError, +) from synapse.types import get_domain_from_id, UserID from synapse.util.logcontext import preserve_fn, make_deferred_yieldable from synapse.util.retryutils import NotRetryingDestination @@ -140,6 +142,10 @@ class E2eKeysHandler(object): failures[destination] = { "status": 503, "message": "Not ready for retry", } + except FederationDeniedError as e: + failures[destination] = { + "status": 403, "message": "Federation Denied", + } except Exception as e: # include ConnectionRefused and other errors failures[destination] = { diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac70730885..677532c87b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -22,6 +22,7 @@ from ._base import BaseHandler from synapse.api.errors import ( AuthError, FederationError, StoreError, CodeMessageException, SynapseError, + FederationDeniedError, ) from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.events.validator import EventValidator @@ -782,6 +783,9 @@ class FederationHandler(BaseHandler): except NotRetryingDestination as e: logger.info(e.message) continue + except FederationDeniedError as e: + logger.info(e) + continue except Exception as e: logger.exception( "Failed to backfill from %s because %s", diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 833496b72d..9145405cb0 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -27,7 +27,7 @@ import synapse.metrics from canonicaljson import encode_canonical_json from synapse.api.errors import ( - SynapseError, Codes, HttpResponseException, + SynapseError, Codes, HttpResponseException, FederationDeniedError, ) from signedjson.sign import sign_json @@ -123,11 +123,22 @@ class MatrixFederationHttpClient(object): Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. + Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist + (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ + if ( + self.hs.config.federation_domain_whitelist and + destination not in self.hs.config.federation_domain_whitelist + ): + raise FederationDeniedError(destination) + limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, @@ -308,6 +319,9 @@ class MatrixFederationHttpClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ if not json_data_callback: @@ -368,6 +382,9 @@ class MatrixFederationHttpClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ def body_callback(method, url_bytes, headers_dict): @@ -422,6 +439,9 @@ class MatrixFederationHttpClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ logger.debug("get_json args: %s", args) @@ -475,6 +495,9 @@ class MatrixFederationHttpClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ response = yield self._request( @@ -518,6 +541,9 @@ class MatrixFederationHttpClient(object): Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + + Fails with ``FederationDeniedError`` if this destination + is not on our federation whitelist """ encoded_args = {} diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index cc2842aa72..17e6079cba 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -93,6 +93,7 @@ class RemoteKey(Resource): self.store = hs.get_datastore() self.version_string = hs.version_string self.clock = hs.get_clock() + self.federation_domain_whitelist = hs.config.federation_domain_whitelist def render_GET(self, request): self.async_render_GET(request) @@ -137,6 +138,13 @@ class RemoteKey(Resource): logger.info("Handling query for keys %r", query) store_queries = [] for server_name, key_ids in query.items(): + if ( + self.federation_domain_whitelist is not None and + server_name not in self.federation_domain_whitelist + ): + logger.debug("Federation denied with %s", server_name) + continue + if not key_ids: key_ids = (None,) for key_id in key_ids: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 4f56bcf577..485db8577a 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -32,8 +32,9 @@ from .media_storage import MediaStorage from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.util.stringutils import random_string -from synapse.api.errors import SynapseError, HttpResponseException, \ - NotFoundError +from synapse.api.errors import ( + SynapseError, HttpResponseException, NotFoundError, FederationDeniedError, +) from synapse.util.async import Linearizer from synapse.util.stringutils import is_ascii @@ -75,6 +76,8 @@ class MediaRepository(object): self.recently_accessed_remotes = set() self.recently_accessed_locals = set() + self.federation_domain_whitelist = hs.config.federation_domain_whitelist + # List of StorageProviders where we should search for media and # potentially upload to. storage_providers = [] @@ -216,6 +219,12 @@ class MediaRepository(object): Deferred: Resolves once a response has successfully been written to request """ + if ( + self.federation_domain_whitelist is not None and + server_name not in self.federation_domain_whitelist + ): + raise FederationDeniedError(server_name) + self.mark_recently_accessed(server_name, media_id) # We linearize here to ensure that we don't try and download remote @@ -250,6 +259,12 @@ class MediaRepository(object): Returns: Deferred[dict]: The media_info of the file """ + if ( + self.federation_domain_whitelist is not None and + server_name not in self.federation_domain_whitelist + ): + raise FederationDeniedError(server_name) + # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 1adedbb361..47b0bb5eb3 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -26,6 +26,18 @@ logger = logging.getLogger(__name__) class NotRetryingDestination(Exception): def __init__(self, retry_last_ts, retry_interval, destination): + """Raised by the limiter (and federation client) to indicate that we are + are deliberately not attempting to contact a given server. + + Args: + retry_last_ts (int): the unix ts in milliseconds of our last attempt + to contact the server. 0 indicates that the last attempt was + successful or that we've never actually attempted to connect. + retry_interval (int): the time in milliseconds to wait until the next + attempt. + destination (str): the domain in question + """ + msg = "Not retrying server %s." % (destination,) super(NotRetryingDestination, self).__init__(msg) diff --git a/tests/utils.py b/tests/utils.py index 44e5f75093..3116047892 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -57,6 +57,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.worker_app = None config.email_enable_notifs = False config.block_non_admin_invites = False + config.federation_domain_whitelist = None # disable user directory updates, because they get done in the # background, which upsets the test runner. -- cgit 1.5.1 From bc496df192fa20dee933590d5f21a3425388c0d7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Feb 2018 17:57:51 +0000 Subject: report metrics on number of cache evictions --- synapse/metrics/metric.py | 11 ++++++++++- synapse/util/caches/descriptors.py | 4 ++++ synapse/util/caches/expiringcache.py | 6 +++++- synapse/util/caches/lrucache.py | 28 +++++++++++++++++++++++++--- tests/metrics/test_metric.py | 12 ++++++++++++ 5 files changed, 56 insertions(+), 5 deletions(-) (limited to 'synapse/util') diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 1e783e5ff4..ff5aa8c0e1 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -193,7 +193,9 @@ class DistributionMetric(object): class CacheMetric(object): - __slots__ = ("name", "cache_name", "hits", "misses", "size_callback") + __slots__ = ( + "name", "cache_name", "hits", "misses", "evicted_size", "size_callback", + ) def __init__(self, name, size_callback, cache_name): self.name = name @@ -201,6 +203,7 @@ class CacheMetric(object): self.hits = 0 self.misses = 0 + self.evicted_size = 0 self.size_callback = size_callback @@ -210,6 +213,9 @@ class CacheMetric(object): def inc_misses(self): self.misses += 1 + def inc_evictions(self, size=1): + self.evicted_size += size + def render(self): size = self.size_callback() hits = self.hits @@ -219,6 +225,9 @@ class CacheMetric(object): """%s:hits{name="%s"} %d""" % (self.name, self.cache_name, hits), """%s:total{name="%s"} %d""" % (self.name, self.cache_name, total), """%s:size{name="%s"} %d""" % (self.name, self.cache_name, size), + """%s:evicted_size{name="%s"} %d""" % ( + self.name, self.cache_name, self.evicted_size + ), ] diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index af65bfe7b8..bf3a66eae4 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -75,6 +75,7 @@ class Cache(object): self.cache = LruCache( max_size=max_entries, keylen=keylen, cache_type=cache_type, size_callback=(lambda d: len(d)) if iterable else None, + evicted_callback=self._on_evicted, ) self.name = name @@ -83,6 +84,9 @@ class Cache(object): self.thread = None self.metrics = register_cache(name, self.cache) + def _on_evicted(self, evicted_count): + self.metrics.inc_evictions(evicted_count) + def check_thread(self): expected_thread = self.thread if expected_thread is None: diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 6ad53a6390..0aa103eecb 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -79,7 +79,11 @@ class ExpiringCache(object): while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - self._size_estimate -= len(value.value) + removed_len = len(value.value) + self.metrics.inc_evictions(removed_len) + self._size_estimate -= removed_len + else: + self.metrics.inc_evictions() def __getitem__(self, key): try: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index cf5fbb679c..f088dd430e 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -49,7 +49,24 @@ class LruCache(object): Can also set callbacks on objects when getting/setting which are fired when that key gets invalidated/evicted. """ - def __init__(self, max_size, keylen=1, cache_type=dict, size_callback=None): + def __init__(self, max_size, keylen=1, cache_type=dict, size_callback=None, + evicted_callback=None): + """ + Args: + max_size (int): + + keylen (int): + + cache_type (type): + type of underlying cache to be used. Typically one of dict + or TreeCache. + + size_callback (func(V) -> int | None): + + evicted_callback (func(int)|None): + if not None, called on eviction with the size of the evicted + entry + """ cache = cache_type() self.cache = cache # Used for introspection. list_root = _Node(None, None, None, None) @@ -61,8 +78,10 @@ class LruCache(object): def evict(): while cache_len() > max_size: todelete = list_root.prev_node - delete_node(todelete) + evicted_len = delete_node(todelete) cache.pop(todelete.key, None) + if evicted_callback: + evicted_callback(evicted_len) def synchronized(f): @wraps(f) @@ -111,12 +130,15 @@ class LruCache(object): prev_node.next_node = next_node next_node.prev_node = prev_node + deleted_len = 1 if size_callback: - cached_cache_len[0] -= size_callback(node.value) + deleted_len = size_callback(node.value) + cached_cache_len[0] -= deleted_len for cb in node.callbacks: cb() node.callbacks.clear() + return deleted_len @synchronized def cache_get(key, default=None, callbacks=[]): diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py index f85455a5af..39bde6e3f8 100644 --- a/tests/metrics/test_metric.py +++ b/tests/metrics/test_metric.py @@ -141,6 +141,7 @@ class CacheMetricTestCase(unittest.TestCase): 'cache:hits{name="cache_name"} 0', 'cache:total{name="cache_name"} 0', 'cache:size{name="cache_name"} 0', + 'cache:evicted_size{name="cache_name"} 0', ]) metric.inc_misses() @@ -150,6 +151,7 @@ class CacheMetricTestCase(unittest.TestCase): 'cache:hits{name="cache_name"} 0', 'cache:total{name="cache_name"} 1', 'cache:size{name="cache_name"} 1', + 'cache:evicted_size{name="cache_name"} 0', ]) metric.inc_hits() @@ -158,4 +160,14 @@ class CacheMetricTestCase(unittest.TestCase): 'cache:hits{name="cache_name"} 1', 'cache:total{name="cache_name"} 2', 'cache:size{name="cache_name"} 1', + 'cache:evicted_size{name="cache_name"} 0', + ]) + + metric.inc_evictions(2) + + self.assertEquals(metric.render(), [ + 'cache:hits{name="cache_name"} 1', + 'cache:total{name="cache_name"} 2', + 'cache:size{name="cache_name"} 1', + 'cache:evicted_size{name="cache_name"} 2', ]) -- cgit 1.5.1 From 3a75de923b9183c073bbddae1e08fae546a11f7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Mar 2018 12:19:09 +0000 Subject: Rewrite make_deferred_yieldable avoiding inlineCallbacks ... because (a) it's actually simpler (b) it might be marginally more performant? --- synapse/util/logcontext.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 94fa7cac98..a8dea15c1b 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -299,10 +299,6 @@ def preserve_fn(f): Useful for wrapping functions that return a deferred which you don't yield on. """ - def reset_context(result): - LoggingContext.set_current_context(LoggingContext.sentinel) - return result - def g(*args, **kwargs): current = LoggingContext.current_context() res = f(*args, **kwargs) @@ -323,12 +319,11 @@ def preserve_fn(f): # which is supposed to have a single entry and exit point. But # by spawning off another deferred, we are effectively # adding a new exit point.) - res.addBoth(reset_context) + res.addBoth(_set_context_cb, LoggingContext.sentinel) return res return g -@defer.inlineCallbacks def make_deferred_yieldable(deferred): """Given a deferred, make it follow the Synapse logcontext rules: @@ -342,9 +337,16 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to preserve_fn.) """ - with PreserveLoggingContext(): - r = yield deferred - defer.returnValue(r) + if isinstance(deferred, defer.Deferred) and not deferred.called: + prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + deferred.addBoth(_set_context_cb, prev_context) + return deferred + + +def _set_context_cb(result, context): + """A callback function which just sets the logging context""" + LoggingContext.set_current_context(context) + return result # modules to ignore in `logcontext_tracer` -- cgit 1.5.1 From 20f40348d4ea55cc5b98528673e26bac7396a3cb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 7 Mar 2018 19:59:24 +0000 Subject: Factor run_in_background out from preserve_fn It annoys me that we create temporary function objects when there's really no need for it. Let's factor the gubbins out of preserve_fn and start using it. --- docs/log_contexts.rst | 8 +++---- synapse/util/logcontext.py | 53 +++++++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 28 deletions(-) (limited to 'synapse/util') diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index b19b7fa1ea..82ac4f91e5 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -279,9 +279,9 @@ Obviously that option means that the operations done in that might be fixed by setting a different logcontext via a ``with LoggingContext(...)`` in ``background_operation``). -The second option is to use ``logcontext.preserve_fn``, which wraps a function -so that it doesn't reset the logcontext even when it returns an incomplete -deferred, and adds a callback to the returned deferred to reset the +The second option is to use ``logcontext.run_in_background``, which wraps a +function so that it doesn't reset the logcontext even when it returns an +incomplete deferred, and adds a callback to the returned deferred to reset the logcontext. In other words, it turns a function that follows the Synapse rules about logcontexts and Deferreds into one which behaves more like an external function — the opposite operation to that described in the previous section. @@ -293,7 +293,7 @@ It can be used like this: def do_request_handling(): yield foreground_operation() - logcontext.preserve_fn(background_operation)() + logcontext.run_in_background(background_operation) # this will now be logged against the request context logger.debug("Request handling complete") diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a8dea15c1b..d660ec785b 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -292,36 +292,41 @@ class PreserveLoggingContext(object): def preserve_fn(f): - """Wraps a function, to ensure that the current context is restored after + """Function decorator which wraps the function with run_in_background""" + def g(*args, **kwargs): + return run_in_background(f, *args, **kwargs) + return g + + +def run_in_background(f, *args, **kwargs): + """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the deferred returned by the funtion completes. Useful for wrapping functions that return a deferred which you don't yield on. """ - def g(*args, **kwargs): - current = LoggingContext.current_context() - res = f(*args, **kwargs) - if isinstance(res, defer.Deferred) and not res.called: - # The function will have reset the context before returning, so - # we need to restore it now. - LoggingContext.set_current_context(current) - - # The original context will be restored when the deferred - # completes, but there is nothing waiting for it, so it will - # get leaked into the reactor or some other function which - # wasn't expecting it. We therefore need to reset the context - # here. - # - # (If this feels asymmetric, consider it this way: we are - # effectively forking a new thread of execution. We are - # probably currently within a ``with LoggingContext()`` block, - # which is supposed to have a single entry and exit point. But - # by spawning off another deferred, we are effectively - # adding a new exit point.) - res.addBoth(_set_context_cb, LoggingContext.sentinel) - return res - return g + current = LoggingContext.current_context() + res = f(*args, **kwargs) + if isinstance(res, defer.Deferred) and not res.called: + # The function will have reset the context before returning, so + # we need to restore it now. + LoggingContext.set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + res.addBoth(_set_context_cb, LoggingContext.sentinel) + return res def make_deferred_yieldable(deferred): -- cgit 1.5.1 From 7c7706f42b56dd61f5eb17679aa12247f7058ed5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 15 Mar 2018 15:40:13 +0000 Subject: Fix bug where state cache used lots of memory The state cache bases its size on the sum of the size of entries. The size of the entry is calculated once on insertion, so it is important that the size of entries does not change. The DictionaryCache modified the entries size, which caused the state cache to incorrectly think it was smaller than it actually was. --- synapse/util/caches/dictionary_cache.py | 6 +++++- synapse/util/caches/lrucache.py | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index d4105822b3..1709e8b429 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -132,9 +132,13 @@ class DictionaryCache(object): self._update_or_insert(key, value, known_absent) def _update_or_insert(self, key, value, known_absent): - entry = self.cache.setdefault(key, DictionaryEntry(False, set(), {})) + # We pop and reinsert as we need to tell the cache the size may have + # changed + + entry = self.cache.pop(key, DictionaryEntry(False, set(), {})) entry.value.update(value) entry.known_absent.update(known_absent) + self.cache[key] = entry def _insert(self, key, value, known_absent): self.cache[key] = DictionaryEntry(True, known_absent, value) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index f088dd430e..a4bf8fa6ae 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -154,14 +154,14 @@ class LruCache(object): def cache_set(key, value, callbacks=[]): node = cache.get(key, None) if node is not None: - if value != node.value: + if node.callbacks and value != node.value: for cb in node.callbacks: cb() node.callbacks.clear() - if size_callback: - cached_cache_len[0] -= size_callback(node.value) - cached_cache_len[0] += size_callback(value) + if size_callback: + cached_cache_len[0] -= size_callback(node.value) + cached_cache_len[0] += size_callback(value) node.callbacks.update(callbacks) -- cgit 1.5.1 From 5a6e54264d3a9f8b9b1f2e99db94e31ea3c21d24 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 9 Mar 2018 13:56:26 +0000 Subject: Make 'unexpected logging context' into warnings I think we've now fixed enough of these that the rest can be logged at warning. --- synapse/util/logcontext.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d660ec785b..fdd4a93d07 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -163,7 +163,7 @@ class LoggingContext(object): current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: - logger.debug("Expected logging context %s has been lost", self) + logger.warn("Expected logging context %s has been lost", self) else: logger.warn( "Current logging context %s is not expected context %s", @@ -278,7 +278,7 @@ class PreserveLoggingContext(object): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.debug( + logger.warn( "Unexpected logging context: %s is not %s", context, self.new_context, ) -- cgit 1.5.1 From 9a0d783c113ae74c55e409d33219cd77f3662b9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Mar 2018 11:35:53 +0000 Subject: Add comments --- synapse/util/caches/lrucache.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'synapse/util') diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index a4bf8fa6ae..1c5a982094 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -154,11 +154,18 @@ class LruCache(object): def cache_set(key, value, callbacks=[]): node = cache.get(key, None) if node is not None: + # We sometimes store large objects, e.g. dicts, which cause + # the inequality check to take a long time. So let's only do + # the check if we have some callbacks to call. if node.callbacks and value != node.value: for cb in node.callbacks: cb() node.callbacks.clear() + # We don't bother to protect this by value != node.value as + # generally size_callback will be cheap compared with equality + # checks. (For example, taking the size of two dicts is quicker + # than comparing them for equality.) if size_callback: cached_cache_len[0] -= size_callback(node.value) cached_cache_len[0] += size_callback(value) -- cgit 1.5.1 From 8cbbfaefc1fc597cbbef52a10dbfb8ecd4d8a8cd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 23 Mar 2018 10:32:50 +0000 Subject: 404 correctly on missing paths via NoResource fixes https://github.com/matrix-org/synapse/issues/2043 and https://github.com/matrix-org/synapse/issues/2029 --- synapse/app/appservice.py | 4 ++-- synapse/app/client_reader.py | 4 ++-- synapse/app/event_creator.py | 4 ++-- synapse/app/federation_reader.py | 4 ++-- synapse/app/federation_sender.py | 4 ++-- synapse/app/frontend_proxy.py | 4 ++-- synapse/app/homeserver.py | 4 ++-- synapse/app/media_repository.py | 4 ++-- synapse/app/pusher.py | 4 ++-- synapse/app/synchrotron.py | 4 ++-- synapse/app/user_dir.py | 4 ++-- synapse/util/httpresourcetree.py | 4 ++-- 12 files changed, 24 insertions(+), 24 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index c6fe4516d1..f2540023a7 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -36,7 +36,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.appservice") @@ -64,7 +64,7 @@ class AppserviceServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 0a8ce9bc66..267d34c881 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.client_reader") @@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 172e989b54..b915d12d53 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -52,7 +52,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.event_creator") @@ -104,7 +104,7 @@ class EventCreatorServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 20d157911b..c1dc66dd17 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_reader") @@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer): FEDERATION_PREFIX: TransportLayerServer(self), }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index f760826d27..0cc3331519 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_sender") @@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 816c080d18..de889357c3 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.frontend_proxy") @@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e477c7ced6..c00afbba28 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,7 @@ from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string from twisted.application import service from twisted.internet import defer, reactor -from twisted.web.resource import EncodingResourceWrapper, Resource +from twisted.web.resource import EncodingResourceWrapper, NoResource from twisted.web.server import GzipEncoderFactory from twisted.web.static import File @@ -126,7 +126,7 @@ class SynapseHomeServer(HomeServer): if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) else: - root_resource = Resource() + root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 84c5791b3b..fc8282bbc1 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.media_repository") @@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer): ), }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 98a4a7c62c..d5c3a85195 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -37,7 +37,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.pusher") @@ -94,7 +94,7 @@ class PusherServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index abe91dcfbd..508b66613d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -56,7 +56,7 @@ from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.synchrotron") @@ -269,7 +269,7 @@ class SynchrotronServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 494ccb702c..5f845e80d1 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.user_dir") @@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index 45be47159a..d747849553 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.web.resource import Resource +from twisted.web.resource import NoResource import logging @@ -45,7 +45,7 @@ def create_resource_tree(desired_tree, root_resource): for path_seg in full_path.split('/')[1:-1]: if path_seg not in last_resource.listNames(): # resource doesn't exist, so make a "dummy resource" - child_resource = Resource() + child_resource = NoResource() last_resource.putChild(path_seg, child_resource) res_id = _resource_id(last_resource, path_seg) resource_mappings[res_id] = child_resource -- cgit 1.5.1 From 05630758f25d958bf60fde4df5f80a89e4a9a0ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 22:57:28 +0100 Subject: Use static JSONEncoders using json.dumps with custom options requires us to create a new JSONEncoder on each call. It's more efficient to create one upfront and reuse it. --- synapse/handlers/message.py | 4 ++-- synapse/replication/tcp/commands.py | 8 +++++--- synapse/storage/events.py | 23 ++++++++--------------- synapse/util/frozenutils.py | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 20 deletions(-) (limited to 'synapse/util') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5a8ddc253e..6de6e13b7b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter from synapse.util.logcontext import preserve_fn, run_in_background from synapse.util.metrics import measure_func -from synapse.util.frozenutils import unfreeze +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client from synapse.replication.http.send_event import send_event_to_master @@ -678,7 +678,7 @@ class EventCreationHandler(object): # Ensure that we can round trip before trying to persist in db try: - dump = simplejson.dumps(unfreeze(event.content)) + dump = frozendict_json_encoder.encode(event.content) simplejson.loads(dump) except Exception: logger.exception("Failed to encode content: %r", event.content) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 0005ad5879..34bcf903a3 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -24,6 +24,8 @@ import simplejson logger = logging.getLogger(__name__) +_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False) + class Command(object): """The base command class. @@ -107,7 +109,7 @@ class RdataCommand(Command): return " ".join(( self.stream_name, str(self.token) if self.token is not None else "batch", - simplejson.dumps(self.row, namedtuple_as_object=False), + _json_encoder.dumps(self.row), )) @@ -302,7 +304,7 @@ class InvalidateCacheCommand(Command): def to_line(self): return " ".join(( - self.cache_func, simplejson.dumps(self.keys, namedtuple_as_object=False) + self.cache_func, _json_encoder.encode(self.keys), )) @@ -334,7 +336,7 @@ class UserIpCommand(Command): ) def to_line(self): - return self.user_id + " " + simplejson.dumps(( + return self.user_id + " " + _json_encoder.encode(( self.access_token, self.ip, self.user_agent, self.device_id, self.last_seen, )) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f3d65f4338..ece5e6c41f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -14,15 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.events_worker import EventsWorkerStore +from collections import OrderedDict, deque, namedtuple +from functools import wraps +import logging +import simplejson as json from twisted.internet import defer -from synapse.events import USE_FROZEN_DICTS +from synapse.storage.events_worker import EventsWorkerStore from synapse.util.async import ObservableDeferred +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.logcontext import ( - PreserveLoggingContext, make_deferred_yieldable + PreserveLoggingContext, make_deferred_yieldable, ) from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -30,16 +34,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.types import get_domain_from_id - -from canonicaljson import encode_canonical_json -from collections import deque, namedtuple, OrderedDict -from functools import wraps - import synapse.metrics -import logging -import simplejson as json - # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 @@ -71,10 +67,7 @@ state_delta_reuse_delta_counter = metrics.register_counter( def encode_json(json_object): - if USE_FROZEN_DICTS: - return encode_canonical_json(json_object) - else: - return json.dumps(json_object, ensure_ascii=False) + return frozendict_json_encoder.encode(json_object) class _EventPeristenceQueue(object): diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 6322f0f55c..f497b51f4a 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -14,6 +14,7 @@ # limitations under the License. from frozendict import frozendict +import simplejson as json def freeze(o): @@ -49,3 +50,21 @@ def unfreeze(o): pass return o + + +def _handle_frozendict(obj): + """Helper for EventEncoder. Makes frozendicts serializable by returning + the underlying dict + """ + if type(obj) is frozendict: + # fishing the protected dict out of the object is a bit nasty, + # but we don't really want the overhead of copying the dict. + return obj._dict + raise TypeError('Object of type %s is not JSON serializable' % + obj.__class__.__name__) + + +# A JSONEncoder which is capable of encoding frozendics without barfing +frozendict_json_encoder = json.JSONEncoder( + default=_handle_frozendict, +) -- cgit 1.5.1 From a9a74101a4925bd208db682952b5dadf4b157a8d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 4 Apr 2018 08:58:53 +0100 Subject: Document the behaviour of ResponseCache it looks like everything that uses ResponseCache expects to have to `make_deferred_yieldable` its results. It's debatable whether that is the best approach, but let's document it for now to avoid further confusion. --- synapse/util/caches/response_cache.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'synapse/util') diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 00af539880..4ecd91deb5 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -31,6 +31,18 @@ class ResponseCache(object): self.timeout_sec = timeout_ms / 1000. def get(self, key): + """Look up the given key. + + Returns a deferred which doesn't follow the synapse logcontext rules, + so you'll probably want to make_deferred_yieldable it. + + Args: + key (str): + + Returns: + twisted.internet.defer.Deferred|None: None if there is no entry + for this key; otherwise a deferred result. + """ result = self.pending_result_cache.get(key) if result is not None: return result.observe() @@ -38,6 +50,26 @@ class ResponseCache(object): return None def set(self, key, deferred): + """Set the entry for the given key to the given deferred. + + *deferred* should run its callbacks in the sentinel logcontext (ie, + you should wrap normal synapse deferreds with + logcontext.run_in_background). + + Returns a new Deferred which also doesn't follow the synapse logcontext + rules, so you will want to make_deferred_yieldable it + + (TODO: before using this more widely, it might make sense to refactor + it and get() so that they do the necessary wrapping rather than having + to do it everywhere ResponseCache is used.) + + Args: + key (str): + deferred (twisted.internet.defer.Deferred): + + Returns: + twisted.internet.defer.Deferred + """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result -- cgit 1.5.1 From 518f6de0881378b1fa356e21256436491d43c93c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 4 Apr 2018 19:46:28 +0100 Subject: Remove redundant metrics which were deprecated in 0.27.0. --- CHANGES.rst | 9 +++++++++ UPGRADE.rst | 9 ++++++++- docs/metrics-howto.rst | 11 +++++++++++ synapse/http/server.py | 26 -------------------------- synapse/util/metrics.py | 25 ------------------------- 5 files changed, 28 insertions(+), 52 deletions(-) (limited to 'synapse/util') diff --git a/CHANGES.rst b/CHANGES.rst index 38372381ac..5fbad54427 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.28.0 (2018-xx-xx) +======================================= + +As previously advised, this release removes a number of redundant Prometheus +metrics. Administrators may need to update their dashboards and alerting rules +to use the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..39a16b1c0c 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,6 +60,13 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. +This release also removes a number of redundant Prometheus metrics. +Administrators may need to update their dashboards and alerting rules to use +the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + + Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 8acc479bc3..5e2d7c52ec 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,6 +34,17 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. +Deprecated metrics removed in 0.28.0 +------------------------------------ + +Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those +listed under "Old name" below. This has been done to reduce the bandwidth used +by gathering metrics and the storage requirements for the Prometheus server, as +well as reducing CPU overhead for both Synapse and Prometheus. + +Administrators should update any alerts or monitoring dashboards to use the +"New name" listed below. + Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index f19c068ef6..02c7e46f08 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,17 +47,6 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) ) requests_counter = metrics.register_counter( @@ -73,39 +62,24 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..c3d8237e8f 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,53 +31,28 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, in this block -- cgit 1.5.1 From 01afc563c39006c21bb7752831cd62c146edc135 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 5 Apr 2018 16:24:04 +0100 Subject: Fix overzealous cache invalidation Fixes an issue where a cache invalidation would invalidate *all* pending entries, rather than just the entry that we intended to invalidate. --- synapse/util/caches/descriptors.py | 64 +++++++++++++++++++++-------------- tests/util/caches/test_descriptors.py | 46 +++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 26 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index bf3a66eae4..68285a7594 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,12 +40,11 @@ _CacheSentinel = object() class CacheEntry(object): __slots__ = [ - "deferred", "sequence", "callbacks", "invalidated" + "deferred", "callbacks", "invalidated" ] - def __init__(self, deferred, sequence, callbacks): + def __init__(self, deferred, callbacks): self.deferred = deferred - self.sequence = sequence self.callbacks = set(callbacks) self.invalidated = False @@ -62,7 +62,6 @@ class Cache(object): "max_entries", "name", "keylen", - "sequence", "thread", "metrics", "_pending_deferred_cache", @@ -80,7 +79,6 @@ class Cache(object): self.name = name self.keylen = keylen - self.sequence = 0 self.thread = None self.metrics = register_cache(name, self.cache) @@ -113,11 +111,10 @@ class Cache(object): callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) if val is not _CacheSentinel: - if val.sequence == self.sequence: - val.callbacks.update(callbacks) - if update_metrics: - self.metrics.inc_hits() - return val.deferred + val.callbacks.update(callbacks) + if update_metrics: + self.metrics.inc_hits() + return val.deferred val = self.cache.get(key, _CacheSentinel, callbacks=callbacks) if val is not _CacheSentinel: @@ -137,12 +134,9 @@ class Cache(object): self.check_thread() entry = CacheEntry( deferred=value, - sequence=self.sequence, callbacks=callbacks, ) - entry.callbacks.update(callbacks) - existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: existing_entry.invalidate() @@ -150,13 +144,25 @@ class Cache(object): self._pending_deferred_cache[key] = entry def shuffle(result): - if self.sequence == entry.sequence: - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - self.cache.set(key, result, entry.callbacks) - else: - entry.invalidate() + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry is entry: + self.cache.set(key, result, entry.callbacks) else: + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + # we're not going to put this entry into the cache, so need + # to make sure that the invalidation callbacks are called. + # That was probably done when _pending_deferred_cache was + # updated, but it's possible that `set` was called without + # `invalidate` being previously called, in which case it may + # not have been. Either way, let's double-check now. entry.invalidate() return result @@ -168,25 +174,29 @@ class Cache(object): def invalidate(self, key): self.check_thread() + self.cache.pop(key, None) - # Increment the sequence number so that any SELECT statements that - # raced with the INSERT don't update the cache (SYN-369) - self.sequence += 1 + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, which will (a) stop it being returned + # for future queries and (b) stop it being persisted as a proper entry + # in self.cache. entry = self._pending_deferred_cache.pop(key, None) + + # run the invalidation callbacks now, rather than waiting for the + # deferred to resolve. if entry: entry.invalidate() - self.cache.pop(key, None) - def invalidate_many(self, key): self.check_thread() if not isinstance(key, tuple): raise TypeError( "The cache key must be a tuple not %r" % (type(key),) ) - self.sequence += 1 self.cache.del_multi(key) + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, as above entry_dict = self._pending_deferred_cache.pop(key, None) if entry_dict is not None: for entry in iterate_tree_cache_entry(entry_dict): @@ -194,8 +204,10 @@ class Cache(object): def invalidate_all(self): self.check_thread() - self.sequence += 1 self.cache.clear() + for entry in self._pending_deferred_cache.itervalues(): + entry.invalidate() + self._pending_deferred_cache.clear() class _CacheDescriptorBase(object): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3f14ab503f..2516fe40f4 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from functools import partial import logging import mock @@ -25,6 +27,50 @@ from tests import unittest logger = logging.getLogger(__name__) +class CacheTestCase(unittest.TestCase): + def test_invalidate_all(self): + cache = descriptors.Cache("testcache") + + callback_record = [False, False] + + def record_callback(idx): + callback_record[idx] = True + + # add a couple of pending entries + d1 = defer.Deferred() + cache.set("key1", d1, partial(record_callback, 0)) + + d2 = defer.Deferred() + cache.set("key2", d2, partial(record_callback, 1)) + + # lookup should return the deferreds + self.assertIs(cache.get("key1"), d1) + self.assertIs(cache.get("key2"), d2) + + # let one of the lookups complete + d2.callback("result2") + self.assertEqual(cache.get("key2"), "result2") + + # now do the invalidation + cache.invalidate_all() + + # lookup should return none + self.assertIsNone(cache.get("key1", None)) + self.assertIsNone(cache.get("key2", None)) + + # both callbacks should have been callbacked + self.assertTrue( + callback_record[0], "Invalidation callback for key1 not called", + ) + self.assertTrue( + callback_record[1], "Invalidation callback for key2 not called", + ) + + # letting the other lookup complete should do nothing + d1.callback("result1") + self.assertIsNone(cache.get("key1", None)) + + class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): -- cgit 1.5.1 From 13decdbf96981782616a3ee1826fce1213a1bc89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Apr 2018 12:58:37 +0100 Subject: Revert "Merge pull request #3066 from matrix-org/rav/remove_redundant_metrics" We aren't ready to release this yet, so I'm reverting it for now. This reverts commit d1679a4ed7947b0814e0f2af9b888a16c588f1a1, reversing changes made to e089100c6231541c446e37e157dec8feed02d283. --- CHANGES.rst | 9 --------- UPGRADE.rst | 9 +-------- docs/metrics-howto.rst | 11 ----------- synapse/http/server.py | 26 ++++++++++++++++++++++++++ synapse/util/metrics.py | 25 +++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 28 deletions(-) (limited to 'synapse/util') diff --git a/CHANGES.rst b/CHANGES.rst index 5fbad54427..38372381ac 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,12 +1,3 @@ -Changes in synapse v0.28.0 (2018-xx-xx) -======================================= - -As previously advised, this release removes a number of redundant Prometheus -metrics. Administrators may need to update their dashboards and alerting rules -to use the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index 39a16b1c0c..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,13 +60,6 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. -This release also removes a number of redundant Prometheus metrics. -Administrators may need to update their dashboards and alerting rules to use -the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - - Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 5e2d7c52ec..8acc479bc3 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,17 +34,6 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. -Deprecated metrics removed in 0.28.0 ------------------------------------- - -Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those -listed under "Old name" below. This has been done to reduce the bandwidth used -by gathering metrics and the storage requirements for the Prometheus server, as -well as reducing CPU overhead for both Synapse and Prometheus. - -Administrators should update any alerts or monitoring dashboards to use the -"New name" listed below. - Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index ac75206ef5..64e083ebfc 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,6 +47,17 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) ) requests_counter = metrics.register_counter( @@ -62,24 +73,39 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c3d8237e8f..e4b5687a4b 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,28 +31,53 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_block_timer:count", + "_block_ru_utime:count", + "_block_ru_stime:count", + "_block_db_txn_count:count", + "_block_db_txn_duration:count", + ) + ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_timer:total", + ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_utime:total", + ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_stime:total", + ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, in this block -- cgit 1.5.1 From 9fbe70a7dc3afabfdac176ba1f4be32dd44602aa Mon Sep 17 00:00:00 2001 From: Vincent Breitmoser Date: Sat, 6 Jan 2018 18:11:02 +0100 Subject: Use sortedcontainers instead of blist This commit drop-in replaces blist with SortedContainers. They are written in pure python so work with pypy, but perform as good as native implementations, at least in a couple benchmarks: http://www.grantjenks.com/docs/sortedcontainers/performance.html --- synapse/federation/send_queue.py | 14 +++++++------- synapse/python_dependencies.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 93e5acebc1..945832283f 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -35,7 +35,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.metrics import Measure import synapse.metrics -from blist import sorteddict +from sortedcontainers import SortedDict from collections import namedtuple import logging @@ -56,19 +56,19 @@ class FederationRemoteSendQueue(object): self.is_mine_id = hs.is_mine_id self.presence_map = {} # Pending presence map user_id -> UserPresenceState - self.presence_changed = sorteddict() # Stream position -> user_id + self.presence_changed = SortedDict() # Stream position -> user_id self.keyed_edu = {} # (destination, key) -> EDU - self.keyed_edu_changed = sorteddict() # stream position -> (destination, key) + self.keyed_edu_changed = SortedDict() # stream position -> (destination, key) - self.edus = sorteddict() # stream position -> Edu + self.edus = SortedDict() # stream position -> Edu - self.failures = sorteddict() # stream position -> (destination, Failure) + self.failures = SortedDict() # stream position -> (destination, Failure) - self.device_messages = sorteddict() # stream position -> destination + self.device_messages = SortedDict() # stream position -> destination self.pos = 1 - self.pos_time = sorteddict() + self.pos_time = SortedDict() # EVERYTHING IS SAD. In particular, python only makes new scopes when # we make a new function, so we need to make a new function so the inner diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 40eedb63cb..f9596bddaf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -34,8 +34,8 @@ REQUIREMENTS = { "bcrypt": ["bcrypt>=3.1.0"], "pillow": ["PIL"], "pydenticon": ["pydenticon"], - "blist": ["blist"], "pysaml2>=3.0.0": ["saml2>=3.0.0"], + "sortedcontainers": ["sortedcontainers"], "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 941d873ab8..2ff46090a6 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -16,7 +16,7 @@ from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR -from blist import sorteddict +from sortedcontainers import SortedDict import logging @@ -35,7 +35,7 @@ class StreamChangeCache(object): def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): self._max_size = int(max_size * CACHE_SIZE_FACTOR) self._entity_to_key = {} - self._cache = sorteddict() + self._cache = SortedDict() self._earliest_known_stream_pos = current_stream_pos self.name = name self.metrics = register_cache(self.name, self._cache) -- cgit 1.5.1 From b3384232a031cc209fb5f0e085bc073a220448be Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Apr 2018 23:14:47 +0100 Subject: Add metrics for ResponseCache --- synapse/appservice/api.py | 3 ++- synapse/federation/federation_server.py | 2 +- synapse/handlers/room_list.py | 5 +++-- synapse/handlers/sync.py | 2 +- synapse/replication/http/send_event.py | 2 +- synapse/util/caches/response_cache.py | 14 +++++++++++++- 6 files changed, 21 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 40c433d7ae..11e9c37c63 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -73,7 +73,8 @@ class ApplicationServiceApi(SimpleHttpClient): super(ApplicationServiceApi, self).__init__(hs) self.clock = hs.get_clock() - self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS) + self.protocol_meta_cache = ResponseCache(hs, "as_protocol_meta", + timeout_ms=HOUR_IN_MS) @defer.inlineCallbacks def query_user(self, service, user_id): diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index bea7fd0b71..e4ce037acf 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -65,7 +65,7 @@ class FederationServer(FederationBase): # We cache responses to state queries, as they take a while and often # come in waves. - self._state_resp_cache = ResponseCache(hs, timeout_ms=30000) + self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000) @defer.inlineCallbacks @log_function diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 5d81f59b44..8028d793c2 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -44,8 +44,9 @@ EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None) class RoomListHandler(BaseHandler): def __init__(self, hs): super(RoomListHandler, self).__init__(hs) - self.response_cache = ResponseCache(hs) - self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000) + self.response_cache = ResponseCache(hs, "room_list") + self.remote_response_cache = ResponseCache(hs, "remote_room_list", + timeout_ms=30 * 1000) def get_local_public_room_list(self, limit=None, since_token=None, search_filter=None, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0f713ce038..06d17ab20c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -169,7 +169,7 @@ class SyncHandler(object): self.presence_handler = hs.get_presence_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() - self.response_cache = ResponseCache(hs) + self.response_cache = ResponseCache(hs, "sync") self.state = hs.get_state_handler() def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0, diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index bbe2f967b7..c6a6551d24 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -115,7 +115,7 @@ class ReplicationSendEventRestServlet(RestServlet): self.clock = hs.get_clock() # The responses are tiny, so we may as well cache them for a while - self.response_cache = ResponseCache(hs, timeout_ms=30 * 60 * 1000) + self.response_cache = ResponseCache(hs, "send_event", timeout_ms=30 * 60 * 1000) def on_PUT(self, request, event_id): result = self.response_cache.get(event_id) diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 00af539880..7f68289723 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.util.async import ObservableDeferred +from synapse.util.caches import metrics as cache_metrics class ResponseCache(object): @@ -24,17 +25,28 @@ class ResponseCache(object): used rather than trying to compute a new response. """ - def __init__(self, hs, timeout_ms=0): + def __init__(self, hs, name, timeout_ms=0): self.pending_result_cache = {} # Requests that haven't finished yet. self.clock = hs.get_clock() self.timeout_sec = timeout_ms / 1000. + self._metrics = cache_metrics.register_cache( + "response_cache", + size_callback=lambda: self.size(), + cache_name=name, + ) + + def size(self): + return len(self.pending_result_cache) + def get(self, key): result = self.pending_result_cache.get(key) if result is not None: + self._metrics.inc_hits() return result.observe() else: + self._metrics.inc_misses() return None def set(self, key, deferred): -- cgit 1.5.1 From b78395b7fe449d59a5c46c81a869f9f191cd934f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 12 Apr 2018 12:08:59 +0100 Subject: Refactor ResponseCache usage Adds a `.wrap` method to ResponseCache which wraps up the boilerplate of a (get, set) pair, and then use it throughout the codebase. This will be largely non-functional, but does include the following functional changes: * federation_server.on_context_state_request: drops use of _server_linearizer which looked redundant and could cause incorrect cache misses by yielding between the get and the set. * RoomListHandler.get_remote_public_room_list(): fixes logcontext leaks * the wrap function includes some logging. I'm hoping this won't be too noisy on production. --- synapse/appservice/api.py | 8 +---- synapse/federation/federation_server.py | 16 +++------ synapse/handlers/room_list.py | 38 ++++++++------------- synapse/handlers/sync.py | 16 ++++----- synapse/replication/http/send_event.py | 18 ++++------ synapse/util/caches/response_cache.py | 58 +++++++++++++++++++++++++++++++-- 6 files changed, 87 insertions(+), 67 deletions(-) (limited to 'synapse/util') diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 11e9c37c63..00efff1464 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -18,7 +18,6 @@ from synapse.api.constants import ThirdPartyEntityKind from synapse.api.errors import CodeMessageException from synapse.http.client import SimpleHttpClient from synapse.events.utils import serialize_event -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable from synapse.util.caches.response_cache import ResponseCache from synapse.types import ThirdPartyInstanceID @@ -194,12 +193,7 @@ class ApplicationServiceApi(SimpleHttpClient): defer.returnValue(None) key = (service.id, protocol) - result = self.protocol_meta_cache.get(key) - if not result: - result = self.protocol_meta_cache.set( - key, preserve_fn(_get)() - ) - return make_deferred_yieldable(result) + return self.protocol_meta_cache.wrap(key, _get) @defer.inlineCallbacks def push_bulk(self, service, events, txn_id=None): diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e4ce037acf..d1611f39a9 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -30,7 +30,6 @@ import synapse.metrics from synapse.types import get_domain_from_id from synapse.util import async from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in @@ -212,16 +211,11 @@ class FederationServer(FederationBase): if not in_room: raise AuthError(403, "Host not in room.") - result = self._state_resp_cache.get((room_id, event_id)) - if not result: - with (yield self._server_linearizer.queue((origin, room_id))): - d = self._state_resp_cache.set( - (room_id, event_id), - preserve_fn(self._on_context_state_request_compute)(room_id, event_id) - ) - resp = yield make_deferred_yieldable(d) - else: - resp = yield make_deferred_yieldable(result) + resp = yield self._state_resp_cache.wrap( + (room_id, event_id), + self._on_context_state_request_compute, + room_id, event_id, + ) defer.returnValue((200, resp)) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 8028d793c2..add3f9b009 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -20,7 +20,6 @@ from ._base import BaseHandler from synapse.api.constants import ( EventTypes, JoinRules, ) -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.async import concurrently_execute from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.util.caches.response_cache import ResponseCache @@ -78,18 +77,11 @@ class RoomListHandler(BaseHandler): ) key = (limit, since_token, network_tuple) - result = self.response_cache.get(key) - if not result: - logger.info("No cached result, calculating one.") - result = self.response_cache.set( - key, - preserve_fn(self._get_public_room_list)( - limit, since_token, network_tuple=network_tuple - ) - ) - else: - logger.info("Using cached deferred result.") - return make_deferred_yieldable(result) + return self.response_cache.wrap( + key, + self._get_public_room_list, + limit, since_token, network_tuple=network_tuple, + ) @defer.inlineCallbacks def _get_public_room_list(self, limit=None, since_token=None, @@ -423,18 +415,14 @@ class RoomListHandler(BaseHandler): server_name, limit, since_token, include_all_networks, third_party_instance_id, ) - result = self.remote_response_cache.get(key) - if not result: - result = self.remote_response_cache.set( - key, - repl_layer.get_public_rooms( - server_name, limit=limit, since_token=since_token, - search_filter=search_filter, - include_all_networks=include_all_networks, - third_party_instance_id=third_party_instance_id, - ) - ) - return result + return self.remote_response_cache.wrap( + key, + repl_layer.get_public_rooms, + server_name, limit=limit, since_token=since_token, + search_filter=search_filter, + include_all_networks=include_all_networks, + third_party_instance_id=third_party_instance_id, + ) class RoomListNextBatch(namedtuple("RoomListNextBatch", ( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 06d17ab20c..c6946831ab 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -15,7 +15,7 @@ from synapse.api.constants import Membership, EventTypes from synapse.util.async import concurrently_execute -from synapse.util.logcontext import LoggingContext, make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure, measure_func from synapse.util.caches.response_cache import ResponseCache from synapse.push.clientformat import format_push_rules_for_user @@ -180,15 +180,11 @@ class SyncHandler(object): Returns: A Deferred SyncResult. """ - result = self.response_cache.get(sync_config.request_key) - if not result: - result = self.response_cache.set( - sync_config.request_key, - preserve_fn(self._wait_for_sync_for_user)( - sync_config, since_token, timeout, full_state - ) - ) - return make_deferred_yieldable(result) + return self.response_cache.wrap( + sync_config.request_key, + self._wait_for_sync_for_user, + sync_config, since_token, timeout, full_state, + ) @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index c6a6551d24..a9baa2c1c3 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -23,7 +23,6 @@ from synapse.events.snapshot import EventContext from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.async import sleep from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.metrics import Measure from synapse.types import Requester, UserID @@ -118,17 +117,12 @@ class ReplicationSendEventRestServlet(RestServlet): self.response_cache = ResponseCache(hs, "send_event", timeout_ms=30 * 60 * 1000) def on_PUT(self, request, event_id): - result = self.response_cache.get(event_id) - if not result: - result = self.response_cache.set( - event_id, - self._handle_request(request) - ) - else: - logger.warn("Returning cached response") - return make_deferred_yieldable(result) - - @preserve_fn + return self.response_cache.wrap( + event_id, + self._handle_request, + request + ) + @defer.inlineCallbacks def _handle_request(self, request): with Measure(self.clock, "repl_send_event_parse"): diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 066fa423fd..0c2c347953 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -12,9 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging from synapse.util.async import ObservableDeferred from synapse.util.caches import metrics as cache_metrics +from synapse.util.logcontext import make_deferred_yieldable, run_in_background + +logger = logging.getLogger(__name__) class ResponseCache(object): @@ -31,6 +35,7 @@ class ResponseCache(object): self.clock = hs.get_clock() self.timeout_sec = timeout_ms / 1000. + self._name = name self._metrics = cache_metrics.register_cache( "response_cache", size_callback=lambda: self.size(), @@ -47,7 +52,7 @@ class ResponseCache(object): so you'll probably want to make_deferred_yieldable it. Args: - key (str): + key (hashable): Returns: twisted.internet.defer.Deferred|None: None if there is no entry @@ -76,7 +81,7 @@ class ResponseCache(object): to do it everywhere ResponseCache is used.) Args: - key (str): + key (hashable): deferred (twisted.internet.defer.Deferred): Returns: @@ -97,3 +102,52 @@ class ResponseCache(object): result.addBoth(remove) return result.observe() + + def wrap(self, key, callback, *args, **kwargs): + """Wrap together a *get* and *set* call, taking care of logcontexts + + First looks up the key in the cache, and if it is present makes it + follow the synapse logcontext rules and returns it. + + Otherwise, makes a call to *callback(*args, **kwargs)*, which should + follow the synapse logcontext rules, and adds the result to the cache. + + Example usage: + + @defer.inlineCallbacks + def handle_request(request): + # etc + defer.returnValue(result) + + result = yield response_cache.wrap( + key, + handle_request, + request, + ) + + Args: + key (hashable): key to get/set in the cache + + callback (callable): function to call if the key is not found in + the cache + + *args: positional parameters to pass to the callback, if it is used + + **kwargs: named paramters to pass to the callback, if it is used + + Returns: + twisted.internet.defer.Deferred: yieldable result + """ + result = self.get(key) + if not result: + logger.info("[%s]: no cached result for [%s], calculating new one", + self._name, key) + d = run_in_background(callback, *args, **kwargs) + result = self.set(key, d) + elif result.called: + logger.info("[%s]: using completed cached result for [%s]", + self._name, key) + else: + logger.info("[%s]: using incomplete cached result for [%s]", + self._name, key) + return make_deferred_yieldable(result) -- cgit 1.5.1 From 60f6014bb7912cf5629ae7d4ab2452ed67e5304a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Apr 2018 07:32:29 +0100 Subject: ResponseCache: fix handling of completed results Turns out that ObservableDeferred.observe doesn't return a deferred if the result is already completed. Fix handling and improve documentation. --- synapse/util/caches/response_cache.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 0c2c347953..7f79333e96 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -14,6 +14,8 @@ # limitations under the License. import logging +from twisted.internet import defer + from synapse.util.async import ObservableDeferred from synapse.util.caches import metrics as cache_metrics from synapse.util.logcontext import make_deferred_yieldable, run_in_background @@ -48,15 +50,21 @@ class ResponseCache(object): def get(self, key): """Look up the given key. - Returns a deferred which doesn't follow the synapse logcontext rules, - so you'll probably want to make_deferred_yieldable it. + Can return either a new Deferred (which also doesn't follow the synapse + logcontext rules), or, if the request has completed, the actual + result. You will probably want to make_deferred_yieldable the result. + + If there is no entry for the key, returns None. It is worth noting that + this means there is no way to distinguish a completed result of None + from an absent cache entry. Args: key (hashable): Returns: - twisted.internet.defer.Deferred|None: None if there is no entry - for this key; otherwise a deferred result. + twisted.internet.defer.Deferred|None|E: None if there is no entry + for this key; otherwise either a deferred result or the result + itself. """ result = self.pending_result_cache.get(key) if result is not None: @@ -73,19 +81,17 @@ class ResponseCache(object): you should wrap normal synapse deferreds with logcontext.run_in_background). - Returns a new Deferred which also doesn't follow the synapse logcontext - rules, so you will want to make_deferred_yieldable it - - (TODO: before using this more widely, it might make sense to refactor - it and get() so that they do the necessary wrapping rather than having - to do it everywhere ResponseCache is used.) + Can return either a new Deferred (which also doesn't follow the synapse + logcontext rules), or, if *deferred* was already complete, the actual + result. You will probably want to make_deferred_yieldable the result. Args: key (hashable): - deferred (twisted.internet.defer.Deferred): + deferred (twisted.internet.defer.Deferred[T): Returns: - twisted.internet.defer.Deferred + twisted.internet.defer.Deferred[T]|T: a new deferred, or the actual + result. """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result @@ -144,7 +150,7 @@ class ResponseCache(object): self._name, key) d = run_in_background(callback, *args, **kwargs) result = self.set(key, d) - elif result.called: + elif not isinstance(result, defer.Deferred) or result.called: logger.info("[%s]: using completed cached result for [%s]", self._name, key) else: -- cgit 1.5.1 From d3347ad48553bd678fca7e3259d0824225cc6af2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Apr 2018 11:16:43 +0100 Subject: Revert "Use sortedcontainers instead of blist" This reverts commit 9fbe70a7dc3afabfdac176ba1f4be32dd44602aa. It turns out that sortedcontainers.SortedDict is not an exact match for blist.sorteddict; in particular, `popitem()` removes things from the opposite end of the dict. This is trivial to fix, but I want to add some unit tests, and potentially some more thought about it, before we do so. --- synapse/federation/send_queue.py | 14 +++++++------- synapse/python_dependencies.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 945832283f..93e5acebc1 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -35,7 +35,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.metrics import Measure import synapse.metrics -from sortedcontainers import SortedDict +from blist import sorteddict from collections import namedtuple import logging @@ -56,19 +56,19 @@ class FederationRemoteSendQueue(object): self.is_mine_id = hs.is_mine_id self.presence_map = {} # Pending presence map user_id -> UserPresenceState - self.presence_changed = SortedDict() # Stream position -> user_id + self.presence_changed = sorteddict() # Stream position -> user_id self.keyed_edu = {} # (destination, key) -> EDU - self.keyed_edu_changed = SortedDict() # stream position -> (destination, key) + self.keyed_edu_changed = sorteddict() # stream position -> (destination, key) - self.edus = SortedDict() # stream position -> Edu + self.edus = sorteddict() # stream position -> Edu - self.failures = SortedDict() # stream position -> (destination, Failure) + self.failures = sorteddict() # stream position -> (destination, Failure) - self.device_messages = SortedDict() # stream position -> destination + self.device_messages = sorteddict() # stream position -> destination self.pos = 1 - self.pos_time = SortedDict() + self.pos_time = sorteddict() # EVERYTHING IS SAD. In particular, python only makes new scopes when # we make a new function, so we need to make a new function so the inner diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f9596bddaf..40eedb63cb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -34,8 +34,8 @@ REQUIREMENTS = { "bcrypt": ["bcrypt>=3.1.0"], "pillow": ["PIL"], "pydenticon": ["pydenticon"], + "blist": ["blist"], "pysaml2>=3.0.0": ["saml2>=3.0.0"], - "sortedcontainers": ["sortedcontainers"], "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 2ff46090a6..941d873ab8 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -16,7 +16,7 @@ from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR -from sortedcontainers import SortedDict +from blist import sorteddict import logging @@ -35,7 +35,7 @@ class StreamChangeCache(object): def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): self._max_size = int(max_size * CACHE_SIZE_FACTOR) self._entity_to_key = {} - self._cache = SortedDict() + self._cache = sorteddict() self._earliest_known_stream_pos = current_stream_pos self.name = name self.metrics = register_cache(self.name, self._cache) -- cgit 1.5.1 From f63ff73c7fc9e27fa42ac73bf520796ff37bfcc2 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 16:39:30 +0200 Subject: add __bool__ alias to __nonzero__ methods Signed-off-by: Adrian Tschira --- synapse/handlers/sync.py | 7 +++++++ synapse/notifier.py | 1 + synapse/util/logcontext.py | 1 + 3 files changed, 9 insertions(+) (limited to 'synapse/util') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 06d17ab20c..fe790b4c06 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -52,6 +52,7 @@ class TimelineBatch(collections.namedtuple("TimelineBatch", [ to tell if room needs to be part of the sync result. """ return bool(self.events) + __bool__ = __nonzero__ # python3 class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ @@ -76,6 +77,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ # nb the notification count does not, er, count: if there's nothing # else in the result, we don't need to send it. ) + __bool__ = __nonzero__ # python3 class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [ @@ -95,6 +97,7 @@ class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [ or self.state or self.account_data ) + __bool__ = __nonzero__ # python3 class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [ @@ -106,6 +109,7 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [ def __nonzero__(self): """Invited rooms should always be reported to the client""" return True + __bool__ = __nonzero__ # python3 class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [ @@ -117,6 +121,7 @@ class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [ def __nonzero__(self): return bool(self.join or self.invite or self.leave) + __bool__ = __nonzero__ # python3 class DeviceLists(collections.namedtuple("DeviceLists", [ @@ -127,6 +132,7 @@ class DeviceLists(collections.namedtuple("DeviceLists", [ def __nonzero__(self): return bool(self.changed or self.left) + __bool__ = __nonzero__ # python3 class SyncResult(collections.namedtuple("SyncResult", [ @@ -159,6 +165,7 @@ class SyncResult(collections.namedtuple("SyncResult", [ self.device_lists or self.groups ) + __bool__ = __nonzero__ # python3 class SyncHandler(object): diff --git a/synapse/notifier.py b/synapse/notifier.py index ef042681bc..0e40a4aad6 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -144,6 +144,7 @@ class _NotifierUserStream(object): class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))): def __nonzero__(self): return bool(self.events) + __bool__ = __nonzero__ # python3 class Notifier(object): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d660ec785b..d59adc236e 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -92,6 +92,7 @@ class LoggingContext(object): def __nonzero__(self): return False + __bool__ = __nonzero__ # python3 sentinel = Sentinel() -- cgit 1.5.1 From 878995e660e46c5dedb56cde8bd7a3d46838cdd7 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 21:37:58 +0200 Subject: Replace Queue with six.moves.queue and a six.range change which I missed the last time Signed-off-by: Adrian Tschira --- synapse/storage/event_federation.py | 6 ++++-- synapse/util/file_consumer.py | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'synapse/util') diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 00ee82d300..e828328243 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -24,7 +24,9 @@ from synapse.util.caches.descriptors import cached from unpaddedbase64 import encode_base64 import logging -from Queue import PriorityQueue, Empty +from six.moves.queue import PriorityQueue, Empty + +from six.moves import range logger = logging.getLogger(__name__) @@ -78,7 +80,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, front_list = list(front) chunks = [ front_list[x:x + 100] - for x in xrange(0, len(front), 100) + for x in range(0, len(front), 100) ] for chunk in chunks: txn.execute( diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 90a2608d6f..3c8a165331 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -17,7 +17,7 @@ from twisted.internet import threads, reactor from synapse.util.logcontext import make_deferred_yieldable, preserve_fn -import Queue +from six.moves import queue class BackgroundFileConsumer(object): @@ -49,7 +49,7 @@ class BackgroundFileConsumer(object): # Queue of slices of bytes to be written. When producer calls # unregister a final None is sent. - self._bytes_queue = Queue.Queue() + self._bytes_queue = queue.Queue() # Deferred that is resolved when finished writing self._finished_deferred = None -- cgit 1.5.1 From 1ea904b9f09ea6a9df7792f61029d0250602d46b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Apr 2018 00:53:18 +0100 Subject: Use deferred.addTimeout instead of time_bound_deferred This doesn't feel like a wheel we need to reinvent. --- synapse/http/__init__.py | 22 +++++++++++++ synapse/http/client.py | 20 +++++------- synapse/http/matrixfederationclient.py | 35 ++++++++++----------- synapse/notifier.py | 23 +++++++------- synapse/util/__init__.py | 56 ---------------------------------- tests/util/test_clock.py | 33 -------------------- 6 files changed, 59 insertions(+), 130 deletions(-) delete mode 100644 tests/util/test_clock.py (limited to 'synapse/util') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index bfebb0f644..20e568bc43 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3 +13,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet.defer import CancelledError +from twisted.python import failure + +from synapse.api.errors import SynapseError + + +class RequestTimedOutError(SynapseError): + """Exception representing timeout of an outbound request""" + def __init__(self): + super(RequestTimedOutError, self).__init__(504, "Timed out") + + +def cancelled_to_request_timed_out_error(value): + """Turns CancelledErrors into RequestTimedOutErrors. + + For use with deferred.addTimeout() + """ + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise RequestTimedOutError() + return value diff --git a/synapse/http/client.py b/synapse/http/client.py index f3e4973c2e..35c8d51e71 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,9 +19,9 @@ from OpenSSL.SSL import VERIFY_NONE from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) +from synapse.http import cancelled_to_request_timed_out_error from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable -from synapse.util import logcontext import synapse.metrics from synapse.http.endpoint import SpiderEndpoint @@ -95,21 +96,16 @@ class SimpleHttpClient(object): # counters to it outgoing_requests_counter.inc(method) - def send_request(): + logger.info("Sending request %s %s", method, uri) + + try: request_deferred = self.agent.request( method, uri, *args, **kwargs ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=60, + request_deferred.addTimeout( + 60, reactor, cancelled_to_request_timed_out_error, ) - - logger.info("Sending request %s %s", method, uri) - - try: - with logcontext.PreserveLoggingContext(): - response = yield send_request() + response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.inc(method, response.code) logger.info( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 60a29081e8..fe4b1636a6 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +13,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.util.retryutils from twisted.internet import defer, reactor, protocol from twisted.internet.error import DNSLookupError from twisted.web.client import readBody, HTTPConnectionPool, Agent from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone +from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint +import synapse.metrics from synapse.util.async import sleep from synapse.util import logcontext -import synapse.metrics +from synapse.util.logcontext import make_deferred_yieldable +import synapse.util.retryutils from canonicaljson import encode_canonical_json @@ -184,21 +187,19 @@ class MatrixFederationHttpClient(object): producer = body_callback(method, http_url_bytes, headers_dict) try: - def send_request(): - request_deferred = self.agent.request( - method, - url_bytes, - Headers(headers_dict), - producer - ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=timeout / 1000. if timeout else 60, - ) - - with logcontext.PreserveLoggingContext(): - response = yield send_request() + request_deferred = self.agent.request( + method, + url_bytes, + Headers(headers_dict), + producer + ) + request_deferred.addTimeout( + timeout / 1000. if timeout else 60, + reactor, cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable( + request_deferred, + ) log_result = "%d %s" % (response.code, response.phrase,) break diff --git a/synapse/notifier.py b/synapse/notifier.py index 0e40a4aad6..1e4f78b993 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from twisted.internet import defer, reactor +from twisted.internet.defer import TimeoutError + from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state -from synapse.util import DeferredTimedOutError from synapse.util.logutils import log_function from synapse.util.async import ObservableDeferred from synapse.util.logcontext import PreserveLoggingContext, preserve_fn @@ -331,11 +332,11 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) + listener.deferred.addTimeout( + (end_time - now) / 1000., reactor, + ) with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) + yield listener.deferred current_token = user_stream.current_token @@ -346,7 +347,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimedOutError: + except TimeoutError: break except defer.CancelledError: break @@ -551,13 +552,11 @@ class Notifier(object): if end_time <= now: break + listener.deferred.addTimeout((end_time - now) / 1000., reactor) try: with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) - except DeferredTimedOutError: + yield listener.deferred + except TimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 756d8ffa32..814a7bf71b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError from synapse.util.logcontext import PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -24,11 +23,6 @@ import logging logger = logging.getLogger(__name__) -class DeferredTimedOutError(SynapseError): - def __init__(self): - super(DeferredTimedOutError, self).__init__(504, "Timed out") - - def unwrapFirstError(failure): # defer.gatherResults and DeferredLists wrap failures. failure.trap(defer.FirstError) @@ -85,53 +79,3 @@ class Clock(object): except Exception: if not ignore_errs: raise - - def time_bound_deferred(self, given_deferred, time_out): - if given_deferred.called: - return given_deferred - - ret_deferred = defer.Deferred() - - def timed_out_fn(): - e = DeferredTimedOutError() - - try: - ret_deferred.errback(e) - except Exception: - pass - - try: - given_deferred.cancel() - except Exception: - pass - - timer = None - - def cancel(res): - try: - self.cancel_call_later(timer) - except Exception: - pass - return res - - ret_deferred.addBoth(cancel) - - def success(res): - try: - ret_deferred.callback(res) - except Exception: - pass - - return res - - def err(res): - try: - ret_deferred.errback(res) - except Exception: - pass - - given_deferred.addCallbacks(callback=success, errback=err) - - timer = self.call_later(time_out, timed_out_fn) - - return ret_deferred diff --git a/tests/util/test_clock.py b/tests/util/test_clock.py deleted file mode 100644 index 9672603579..0000000000 --- a/tests/util/test_clock.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from synapse import util -from twisted.internet import defer -from tests import unittest - - -class ClockTestCase(unittest.TestCase): - @defer.inlineCallbacks - def test_time_bound_deferred(self): - # just a deferred which never resolves - slow_deferred = defer.Deferred() - - clock = util.Clock() - time_bound = clock.time_bound_deferred(slow_deferred, 0.001) - - try: - yield time_bound - self.fail("Expected timedout error, but got nothing") - except util.DeferredTimedOutError: - pass -- cgit 1.5.1 From 9255a6cb17716c022ebae1dbe9c142b78ca86ea7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:07:40 +0100 Subject: Improve exception handling for background processes There were a bunch of places where we fire off a process to happen in the background, but don't have any exception handling on it - instead relying on the unhandled error being logged when the relevent deferred gets garbage-collected. This is unsatisfactory for a number of reasons: - logging on garbage collection is best-effort and may happen some time after the error, if at all - it can be hard to figure out where the error actually happened. - it is logged as a scary CRITICAL error which (a) I always forget to grep for and (b) it's not really CRITICAL if a background process we don't care about fails. So this is an attempt to add exception handling to everything we fire off into the background. --- synapse/app/appservice.py | 15 +++-- synapse/app/federation_sender.py | 27 +++++---- synapse/app/pusher.py | 31 +++++----- synapse/app/synchrotron.py | 95 ++++++++++++++++--------------- synapse/app/user_dir.py | 13 ++++- synapse/appservice/scheduler.py | 25 ++++---- synapse/crypto/keyring.py | 93 +++++++++++++++--------------- synapse/federation/transaction_queue.py | 2 + synapse/federation/transport/server.py | 13 ++++- synapse/groups/attestations.py | 44 +++++++------- synapse/handlers/message.py | 22 +++++-- synapse/handlers/presence.py | 19 +++++-- synapse/handlers/receipts.py | 61 ++++++++++---------- synapse/handlers/typing.py | 43 +++++++------- synapse/notifier.py | 13 +++-- synapse/push/emailpusher.py | 11 ++-- synapse/push/httppusher.py | 5 +- synapse/rest/media/v1/storage_provider.py | 9 ++- synapse/storage/event_push_actions.py | 24 +++++--- synapse/util/logcontext.py | 7 ++- 20 files changed, 335 insertions(+), 237 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index f2540023a7..58f2c9d68c 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -32,10 +32,10 @@ from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor +from twisted.internet import reactor, defer from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.appservice") @@ -112,9 +112,14 @@ class ASReplicationHandler(ReplicationClientHandler): if stream_name == "events": max_stream_id = self.store.get_room_max_stream_ordering() - preserve_fn( - self.appservice_handler.notify_interested_services - )(max_stream_id) + run_in_background(self._notify_app_services, max_stream_id) + + @defer.inlineCallbacks + def _notify_app_services(self, room_stream_id): + try: + yield self.appservice_handler.notify_interested_services(room_stream_id) + except Exception: + logger.exception("Error notifying application services of event") def start(config_options): diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 0cc3331519..4f2a9ca21a 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -237,19 +237,22 @@ class FederationSenderHandler(object): @defer.inlineCallbacks def update_token(self, token): - self.federation_position = token - - # We linearize here to ensure we don't have races updating the token - with (yield self._fed_position_linearizer.queue(None)): - if self._last_ack < self.federation_position: - yield self.store.update_federation_out_pos( - "federation", self.federation_position - ) + try: + self.federation_position = token + + # We linearize here to ensure we don't have races updating the token + with (yield self._fed_position_linearizer.queue(None)): + if self._last_ack < self.federation_position: + yield self.store.update_federation_out_pos( + "federation", self.federation_position + ) - # We ACK this token over replication so that the master can drop - # its in memory queues - self.replication_client.send_federation_ack(self.federation_position) - self._last_ack = self.federation_position + # We ACK this token over replication so that the master can drop + # its in memory queues + self.replication_client.send_federation_ack(self.federation_position) + self._last_ack = self.federation_position + except Exception: + logger.exception("Error updating federation stream position") if __name__ == '__main__': diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index d5c3a85195..739d113ad5 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -144,20 +144,23 @@ class PusherReplicationHandler(ReplicationClientHandler): @defer.inlineCallbacks def poke_pushers(self, stream_name, token, rows): - if stream_name == "pushers": - for row in rows: - if row.deleted: - yield self.stop_pusher(row.user_id, row.app_id, row.pushkey) - else: - yield self.start_pusher(row.user_id, row.app_id, row.pushkey) - elif stream_name == "events": - yield self.pusher_pool.on_new_notifications( - token, token, - ) - elif stream_name == "receipts": - yield self.pusher_pool.on_new_receipts( - token, token, set(row.room_id for row in rows) - ) + try: + if stream_name == "pushers": + for row in rows: + if row.deleted: + yield self.stop_pusher(row.user_id, row.app_id, row.pushkey) + else: + yield self.start_pusher(row.user_id, row.app_id, row.pushkey) + elif stream_name == "events": + yield self.pusher_pool.on_new_notifications( + token, token, + ) + elif stream_name == "receipts": + yield self.pusher_pool.on_new_receipts( + token, token, set(row.room_id for row in rows) + ) + except Exception: + logger.exception("Error poking pushers") def stop_pusher(self, user_id, app_id, pushkey): key = "%s:%s" % (app_id, pushkey) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2fddcd935a..777da564d7 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -340,55 +340,58 @@ class SyncReplicationHandler(ReplicationClientHandler): @defer.inlineCallbacks def process_and_notify(self, stream_name, token, rows): - if stream_name == "events": - # We shouldn't get multiple rows per token for events stream, so - # we don't need to optimise this for multiple rows. - for row in rows: - event = yield self.store.get_event(row.event_id) - extra_users = () - if event.type == EventTypes.Member: - extra_users = (event.state_key,) - max_token = self.store.get_room_max_stream_ordering() - self.notifier.on_new_room_event( - event, token, max_token, extra_users + try: + if stream_name == "events": + # We shouldn't get multiple rows per token for events stream, so + # we don't need to optimise this for multiple rows. + for row in rows: + event = yield self.store.get_event(row.event_id) + extra_users = () + if event.type == EventTypes.Member: + extra_users = (event.state_key,) + max_token = self.store.get_room_max_stream_ordering() + self.notifier.on_new_room_event( + event, token, max_token, extra_users + ) + elif stream_name == "push_rules": + self.notifier.on_new_event( + "push_rules_key", token, users=[row.user_id for row in rows], ) - elif stream_name == "push_rules": - self.notifier.on_new_event( - "push_rules_key", token, users=[row.user_id for row in rows], - ) - elif stream_name in ("account_data", "tag_account_data",): - self.notifier.on_new_event( - "account_data_key", token, users=[row.user_id for row in rows], - ) - elif stream_name == "receipts": - self.notifier.on_new_event( - "receipt_key", token, rooms=[row.room_id for row in rows], - ) - elif stream_name == "typing": - self.typing_handler.process_replication_rows(token, rows) - self.notifier.on_new_event( - "typing_key", token, rooms=[row.room_id for row in rows], - ) - elif stream_name == "to_device": - entities = [row.entity for row in rows if row.entity.startswith("@")] - if entities: + elif stream_name in ("account_data", "tag_account_data",): self.notifier.on_new_event( - "to_device_key", token, users=entities, + "account_data_key", token, users=[row.user_id for row in rows], ) - elif stream_name == "device_lists": - all_room_ids = set() - for row in rows: - room_ids = yield self.store.get_rooms_for_user(row.user_id) - all_room_ids.update(room_ids) - self.notifier.on_new_event( - "device_list_key", token, rooms=all_room_ids, - ) - elif stream_name == "presence": - yield self.presence_handler.process_replication_rows(token, rows) - elif stream_name == "receipts": - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows], - ) + elif stream_name == "receipts": + self.notifier.on_new_event( + "receipt_key", token, rooms=[row.room_id for row in rows], + ) + elif stream_name == "typing": + self.typing_handler.process_replication_rows(token, rows) + self.notifier.on_new_event( + "typing_key", token, rooms=[row.room_id for row in rows], + ) + elif stream_name == "to_device": + entities = [row.entity for row in rows if row.entity.startswith("@")] + if entities: + self.notifier.on_new_event( + "to_device_key", token, users=entities, + ) + elif stream_name == "device_lists": + all_room_ids = set() + for row in rows: + room_ids = yield self.store.get_rooms_for_user(row.user_id) + all_room_ids.update(room_ids) + self.notifier.on_new_event( + "device_list_key", token, rooms=all_room_ids, + ) + elif stream_name == "presence": + yield self.presence_handler.process_replication_rows(token, rows) + elif stream_name == "receipts": + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows], + ) + except Exception: + logger.exception("Error processing replication") def start(config_options): diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 5f845e80d1..5ba7e9b416 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -39,10 +39,10 @@ from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor +from twisted.internet import reactor, defer from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.user_dir") @@ -164,7 +164,14 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler): stream_name, token, rows ) if stream_name == "current_state_deltas": - preserve_fn(self.user_directory.notify_new_event)() + run_in_background(self._notify_directory) + + @defer.inlineCallbacks + def _notify_directory(self): + try: + yield self.user_directory.notify_new_event() + except Exception: + logger.exception("Error notifiying user directory of state update") def start(config_options): diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 6da315473d..dfc8d1b42e 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -176,17 +176,20 @@ class _TransactionController(object): @defer.inlineCallbacks def _start_recoverer(self, service): - yield self.store.set_appservice_state( - service, - ApplicationServiceState.DOWN - ) - logger.info( - "Application service falling behind. Starting recoverer. AS ID %s", - service.id - ) - recoverer = self.recoverer_fn(service, self.on_recovered) - self.add_recoverers([recoverer]) - recoverer.recover() + try: + yield self.store.set_appservice_state( + service, + ApplicationServiceState.DOWN + ) + logger.info( + "Application service falling behind. Starting recoverer. AS ID %s", + service.id + ) + recoverer = self.recoverer_fn(service, self.on_recovered) + self.add_recoverers([recoverer]) + recoverer.recover() + except Exception: + logger.exception("Error starting AS recoverer") @defer.inlineCallbacks def _is_service_up(self, service): diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index fce83d445f..32cbddbc53 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -146,53 +146,56 @@ class Keyring(object): verify_requests (List[VerifyKeyRequest]): """ - # create a deferred for each server we're going to look up the keys - # for; we'll resolve them once we have completed our lookups. - # These will be passed into wait_for_previous_lookups to block - # any other lookups until we have finished. - # The deferreds are called with no logcontext. - server_to_deferred = { - rq.server_name: defer.Deferred() - for rq in verify_requests - } - - # We want to wait for any previous lookups to complete before - # proceeding. - yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], - server_to_deferred, - ) - - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - # - # map from server name to a set of request ids - server_to_request_ids = {} - - for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) - - def remove_deferreds(res, verify_request): - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids[server_name].discard(request_id) - if not server_to_request_ids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) - return res - - for verify_request in verify_requests: - verify_request.deferred.addBoth( - remove_deferreds, verify_request, + try: + # create a deferred for each server we're going to look up the keys + # for; we'll resolve them once we have completed our lookups. + # These will be passed into wait_for_previous_lookups to block + # any other lookups until we have finished. + # The deferreds are called with no logcontext. + server_to_deferred = { + rq.server_name: defer.Deferred() + for rq in verify_requests + } + + # We want to wait for any previous lookups to complete before + # proceeding. + yield self.wait_for_previous_lookups( + [rq.server_name for rq in verify_requests], + server_to_deferred, ) + # Actually start fetching keys. + self._get_server_verify_keys(verify_requests) + + # When we've finished fetching all the keys for a given server_name, + # resolve the deferred passed to `wait_for_previous_lookups` so that + # any lookups waiting will proceed. + # + # map from server name to a set of request ids + server_to_request_ids = {} + + for verify_request in verify_requests: + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids.setdefault(server_name, set()).add(request_id) + + def remove_deferreds(res, verify_request): + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids[server_name].discard(request_id) + if not server_to_request_ids[server_name]: + d = server_to_deferred.pop(server_name, None) + if d: + d.callback(None) + return res + + for verify_request in verify_requests: + verify_request.deferred.addBoth( + remove_deferreds, verify_request, + ) + except Exception: + logger.exception("Error starting key lookups") + @defer.inlineCallbacks def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 963d938edd..ded2b1871a 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -323,6 +323,8 @@ class TransactionQueue(object): break yield self._process_presence_inner(states_map.values()) + except Exception: + logger.exception("Error sending presence states to servers") finally: self._processing_pending_presence = False diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ff0656df3e..19d09f5422 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -25,7 +25,7 @@ from synapse.http.servlet import ( ) from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.types import ThirdPartyInstanceID, get_domain_from_id import functools @@ -152,11 +152,18 @@ class Authenticator(object): # alive retry_timings = yield self.store.get_destination_retry_timings(origin) if retry_timings and retry_timings["retry_last_ts"]: - logger.info("Marking origin %r as up", origin) - preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0) + run_in_background(self._reset_retry_timings, origin) defer.returnValue(origin) + @defer.inlineCallbacks + def _reset_retry_timings(self, origin): + try: + logger.info("Marking origin %r as up", origin) + yield self.store.set_destination_retry_timings(origin, 0, 0) + except Exception: + logger.exception("Error resetting retry timings on %s", origin) + class BaseFederationServlet(object): REQUIRE_AUTH = True diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 1fb709e6c3..7187df2508 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -165,28 +165,32 @@ class GroupAttestionRenewer(object): @defer.inlineCallbacks def _renew_attestation(group_id, user_id): - if not self.is_mine_id(group_id): - destination = get_domain_from_id(group_id) - elif not self.is_mine_id(user_id): - destination = get_domain_from_id(user_id) - else: - logger.warn( - "Incorrectly trying to do attestations for user: %r in %r", - user_id, group_id, + try: + if not self.is_mine_id(group_id): + destination = get_domain_from_id(group_id) + elif not self.is_mine_id(user_id): + destination = get_domain_from_id(user_id) + else: + logger.warn( + "Incorrectly trying to do attestations for user: %r in %r", + user_id, group_id, + ) + yield self.store.remove_attestation_renewal(group_id, user_id) + return + + attestation = self.attestations.create_attestation(group_id, user_id) + + yield self.transport_client.renew_group_attestation( + destination, group_id, user_id, + content={"attestation": attestation}, ) - yield self.store.remove_attestation_renewal(group_id, user_id) - return - - attestation = self.attestations.create_attestation(group_id, user_id) - yield self.transport_client.renew_group_attestation( - destination, group_id, user_id, - content={"attestation": attestation}, - ) - - yield self.store.update_attestation_renewal( - group_id, user_id, attestation - ) + yield self.store.update_attestation_renewal( + group_id, user_id, attestation + ) + except Exception: + logger.exception("Error renewing attestation of %r in %r", + user_id, group_id) for row in rows: group_id = row["group_id"] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..d168ff5b86 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -857,15 +857,25 @@ class EventCreationHandler(object): @defer.inlineCallbacks def _notify(): yield run_on_reactor() - self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, - extra_users=extra_users - ) + try: + self.notifier.on_new_room_event( + event, event_stream_id, max_stream_id, + extra_users=extra_users + ) + except Exception: + logger.exception("Error notifying about new room event") preserve_fn(_notify)() if event.type == EventTypes.Message: - presence = self.hs.get_presence_handler() # We don't want to block sending messages on any presence code. This # matters as sometimes presence code can take a while. - preserve_fn(presence.bump_presence_active_time)(requester.user) + run_in_background(self._bump_active_time, requester.user) + + @defer.inlineCallbacks + def _bump_active_time(self, user): + try: + presence = self.hs.get_presence_handler() + yield presence.bump_presence_active_time(user) + except Exception: + logger.exception("Error bumping presence active time") diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index a5e501897c..585f3e4da2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -31,7 +31,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.util.async import Linearizer -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -254,6 +254,14 @@ class PresenceHandler(object): logger.info("Finished _persist_unpersisted_changes") + @defer.inlineCallbacks + def _update_states_and_catch_exception(self, new_states): + try: + res = yield self._update_states(new_states) + defer.returnValue(res) + except Exception: + logger.exception("Error updating presence") + @defer.inlineCallbacks def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes @@ -364,7 +372,7 @@ class PresenceHandler(object): now=now, ) - preserve_fn(self._update_states)(changes) + run_in_background(self._update_states_and_catch_exception, changes) except Exception: logger.exception("Exception in _handle_timeouts loop") @@ -422,20 +430,23 @@ class PresenceHandler(object): @defer.inlineCallbacks def _end(): - if affect_presence: + try: self.user_to_num_current_syncs[user_id] -= 1 prev_state = yield self.current_state_for_user(user_id) yield self._update_states([prev_state.copy_and_replace( last_user_sync_ts=self.clock.time_msec(), )]) + except Exception: + logger.exception("Error updating presence after sync") @contextmanager def _user_syncing(): try: yield finally: - preserve_fn(_end)() + if affect_presence: + run_in_background(_end) defer.returnValue(_user_syncing()) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 3f215c2b4e..2e0672161c 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -135,37 +135,40 @@ class ReceiptsHandler(BaseHandler): """Given a list of receipts, works out which remote servers should be poked and pokes them. """ - # TODO: Some of this stuff should be coallesced. - for receipt in receipts: - room_id = receipt["room_id"] - receipt_type = receipt["receipt_type"] - user_id = receipt["user_id"] - event_ids = receipt["event_ids"] - data = receipt["data"] - - users = yield self.state.get_current_user_in_room(room_id) - remotedomains = set(get_domain_from_id(u) for u in users) - remotedomains = remotedomains.copy() - remotedomains.discard(self.server_name) - - logger.debug("Sending receipt to: %r", remotedomains) - - for domain in remotedomains: - self.federation.send_edu( - destination=domain, - edu_type="m.receipt", - content={ - room_id: { - receipt_type: { - user_id: { - "event_ids": event_ids, - "data": data, + try: + # TODO: Some of this stuff should be coallesced. + for receipt in receipts: + room_id = receipt["room_id"] + receipt_type = receipt["receipt_type"] + user_id = receipt["user_id"] + event_ids = receipt["event_ids"] + data = receipt["data"] + + users = yield self.state.get_current_user_in_room(room_id) + remotedomains = set(get_domain_from_id(u) for u in users) + remotedomains = remotedomains.copy() + remotedomains.discard(self.server_name) + + logger.debug("Sending receipt to: %r", remotedomains) + + for domain in remotedomains: + self.federation.send_edu( + destination=domain, + edu_type="m.receipt", + content={ + room_id: { + receipt_type: { + user_id: { + "event_ids": event_ids, + "data": data, + } } - } + }, }, - }, - key=(room_id, receipt_type, user_id), - ) + key=(room_id, receipt_type, user_id), + ) + except Exception: + logger.exception("Error pushing receipts to remote servers") @defer.inlineCallbacks def get_receipts_for_room(self, room_id, to_key): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77c0cf146f..823e2e27e1 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -205,28 +205,31 @@ class TypingHandler(object): @defer.inlineCallbacks def _push_remote(self, member, typing): - users = yield self.state.get_current_user_in_room(member.room_id) - self._member_last_federation_poke[member] = self.clock.time_msec() + try: + users = yield self.state.get_current_user_in_room(member.room_id) + self._member_last_federation_poke[member] = self.clock.time_msec() - now = self.clock.time_msec() - self.wheel_timer.insert( - now=now, - obj=member, - then=now + FEDERATION_PING_INTERVAL, - ) + now = self.clock.time_msec() + self.wheel_timer.insert( + now=now, + obj=member, + then=now + FEDERATION_PING_INTERVAL, + ) - for domain in set(get_domain_from_id(u) for u in users): - if domain != self.server_name: - self.federation.send_edu( - destination=domain, - edu_type="m.typing", - content={ - "room_id": member.room_id, - "user_id": member.user_id, - "typing": typing, - }, - key=member, - ) + for domain in set(get_domain_from_id(u) for u in users): + if domain != self.server_name: + self.federation.send_edu( + destination=domain, + edu_type="m.typing", + content={ + "room_id": member.room_id, + "user_id": member.user_id, + "typing": typing, + }, + key=member, + ) + except Exception: + logger.exception("Error pushing typing notif to remotes") @defer.inlineCallbacks def _recv_edu(self, origin, content): diff --git a/synapse/notifier.py b/synapse/notifier.py index 0e40a4aad6..939723a404 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -21,7 +21,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.util import DeferredTimedOutError from synapse.util.logutils import log_function from synapse.util.async import ObservableDeferred -from synapse.util.logcontext import PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.metrics import Measure from synapse.types import StreamToken from synapse.visibility import filter_events_for_client @@ -251,9 +251,7 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - preserve_fn(self.appservice_handler.notify_interested_services)( - room_stream_id - ) + run_in_background(self._notify_app_services, room_stream_id) if self.federation_sender: self.federation_sender.notify_new_events(room_stream_id) @@ -267,6 +265,13 @@ class Notifier(object): rooms=[event.room_id], ) + @defer.inlineCallbacks + def _notify_app_services(self, room_stream_id): + try: + yield self.appservice_handler.notify_interested_services(room_stream_id) + except Exception: + logger.exception("Error notifying application services of event") + def on_new_event(self, stream_key, new_token, users=[], rooms=[]): """ Used to inform listeners that something has happend event wise. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 58df98a793..ba7286cb72 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -77,10 +77,13 @@ class EmailPusher(object): @defer.inlineCallbacks def on_started(self): if self.mailer is not None: - self.throttle_params = yield self.store.get_throttle_params_by_room( - self.pusher_id - ) - yield self._process() + try: + self.throttle_params = yield self.store.get_throttle_params_by_room( + self.pusher_id + ) + yield self._process() + except Exception: + logger.exception("Error starting email pusher") def on_stop(self): if self.timed_call: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 2cbac571b8..1420d378ef 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -94,7 +94,10 @@ class HttpPusher(object): @defer.inlineCallbacks def on_started(self): - yield self._process() + try: + yield self._process() + except Exception: + logger.exception("Error starting http pusher") @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index c188192f2b..0252afd9d3 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -18,7 +18,7 @@ from twisted.internet import defer, threads from .media_storage import FileResponder from synapse.config._base import Config -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background import logging import os @@ -87,7 +87,12 @@ class StorageProviderWrapper(StorageProvider): return self.backend.store_file(path, file_info) else: # TODO: Handle errors. - preserve_fn(self.backend.store_file)(path, file_info) + def store(): + try: + return self.backend.store_file(path, file_info) + except Exception: + logger.exception("Error storing file") + run_in_background(store) return defer.succeed(None) def fetch(self, path, file_info): diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index e78f8d0114..c22762eb5c 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -448,6 +448,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): "add_push_actions_to_staging", _add_push_actions_to_staging_txn ) + @defer.inlineCallbacks def remove_push_actions_from_staging(self, event_id): """Called if we failed to persist the event to ensure that stale push actions don't build up in the DB @@ -456,13 +457,22 @@ class EventPushActionsWorkerStore(SQLBaseStore): event_id (str) """ - return self._simple_delete( - table="event_push_actions_staging", - keyvalues={ - "event_id": event_id, - }, - desc="remove_push_actions_from_staging", - ) + try: + res = yield self._simple_delete( + table="event_push_actions_staging", + keyvalues={ + "event_id": event_id, + }, + desc="remove_push_actions_from_staging", + ) + defer.returnValue(res) + except Exception: + # this method is called from an exception handler, so propagating + # another exception here really isn't helpful - there's nothing + # the caller can do about it. Just log the exception and move on. + logger.exception( + "Error removing push actions after event persistence failure", + ) @defer.inlineCallbacks def _find_stream_orderings_for_times(self): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..d6587e4409 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -305,7 +305,12 @@ def run_in_background(f, *args, **kwargs): deferred returned by the funtion completes. Useful for wrapping functions that return a deferred which you don't yield - on. + on (for instance because you want to pass it to deferred.gatherResults()). + + Note that if you completely discard the result, you should make sure that + `f` doesn't raise any deferred exceptions, otherwise a scary-looking + CRITICAL error about an unhandled error will be logged without much + indication about where it came from. """ current = LoggingContext.current_context() res = f(*args, **kwargs) -- cgit 1.5.1 From 13843f771ea64cfd4fb7e2f03854b9506f63fcfd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:17:13 +0100 Subject: Trap exceptions thrown within run_in_background Turn any exceptions that get thrown synchronously within run_in_background into Failures instead. --- synapse/util/logcontext.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..127581a0b4 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -308,7 +308,13 @@ def run_in_background(f, *args, **kwargs): on. """ current = LoggingContext.current_context() - res = f(*args, **kwargs) + try: + res = f(*args, **kwargs) + except: # noqa: E722 + # the assumption here is that the caller doesn't want to be disturbed + # by synchronous exceptions, so let's turn them into Failures. + return defer.fail() + if isinstance(res, defer.Deferred) and not res.called: # The function will have reset the context before returning, so # we need to restore it now. -- cgit 1.5.1 From 9d2c1b8429996cb9766ac636034485e2a1d685cc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:52:30 +0100 Subject: Backport deferred.addTimeout Twisted 16.0 doesn't have addTimeout, so let's backport it. --- synapse/http/__init__.py | 2 +- synapse/http/client.py | 6 ++- synapse/http/matrixfederationclient.py | 7 ++-- synapse/notifier.py | 22 +++++++---- synapse/util/async.py | 67 ++++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 14 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 20e568bc43..0d47ccdb59 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -28,7 +28,7 @@ class RequestTimedOutError(SynapseError): def cancelled_to_request_timed_out_error(value): """Turns CancelledErrors into RequestTimedOutErrors. - For use with deferred.addTimeout() + For use with async.add_timeout_to_deferred """ if isinstance(value, failure.Failure): value.trap(CancelledError) diff --git a/synapse/http/client.py b/synapse/http/client.py index 35c8d51e71..62309c3365 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -20,6 +20,7 @@ from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) from synapse.http import cancelled_to_request_timed_out_error +from synapse.util.async import add_timeout_to_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable import synapse.metrics @@ -102,8 +103,9 @@ class SimpleHttpClient(object): request_deferred = self.agent.request( method, uri, *args, **kwargs ) - request_deferred.addTimeout( - 60, reactor, cancelled_to_request_timed_out_error, + add_timeout_to_deferred( + request_deferred, + 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index fe4b1636a6..30036fe81c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -22,7 +22,7 @@ from twisted.web._newclient import ResponseDone from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint import synapse.metrics -from synapse.util.async import sleep +from synapse.util.async import sleep, add_timeout_to_deferred from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable import synapse.util.retryutils @@ -193,9 +193,10 @@ class MatrixFederationHttpClient(object): Headers(headers_dict), producer ) - request_deferred.addTimeout( + add_timeout_to_deferred( + request_deferred, timeout / 1000. if timeout else 60, - reactor, cancelled_to_request_timed_out_error, + cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, diff --git a/synapse/notifier.py b/synapse/notifier.py index 1e4f78b993..a1c06e8fca 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from twisted.internet.defer import TimeoutError +from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.util.logutils import log_function -from synapse.util.async import ObservableDeferred +from synapse.util.async import ( + ObservableDeferred, add_timeout_to_deferred, + DeferredTimeoutError, +) from synapse.util.logcontext import PreserveLoggingContext, preserve_fn from synapse.util.metrics import Measure from synapse.types import StreamToken @@ -332,8 +334,9 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - listener.deferred.addTimeout( - (end_time - now) / 1000., reactor, + add_timeout_to_deferred( + listener.deferred, + (end_time - now) / 1000., ) with PreserveLoggingContext(): yield listener.deferred @@ -347,7 +350,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break @@ -552,11 +555,14 @@ class Notifier(object): if end_time <= now: break - listener.deferred.addTimeout((end_time - now) / 1000., reactor) + add_timeout_to_deferred( + listener.deferred.addTimeout, + (end_time - now) / 1000., + ) try: with PreserveLoggingContext(): yield listener.deferred - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/async.py b/synapse/util/async.py index 0729bb2863..1df5c5600c 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -15,6 +15,8 @@ from twisted.internet import defer, reactor +from twisted.internet.defer import CancelledError +from twisted.python import failure from .logcontext import ( PreserveLoggingContext, make_deferred_yieldable, preserve_fn @@ -392,3 +394,68 @@ class ReadWriteLock(object): self.key_to_current_writer.pop(key) defer.returnValue(_ctx_manager()) + + +class DeferredTimeoutError(Exception): + """ + This error is raised by default when a L{Deferred} times out. + """ + + +def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None): + """ + Add a timeout to a deferred by scheduling it to be cancelled after + timeout seconds. + + This is essentially a backport of deferred.addTimeout, which was introduced + in twisted 16.5. + + If the deferred gets timed out, it errbacks with a DeferredTimeoutError, + unless a cancelable function was passed to its initialization or unless + a different on_timeout_cancel callable is provided. + + Args: + deferred (defer.Deferred): deferred to be timed out + timeout (Number): seconds to time out after + + on_timeout_cancel (callable): A callable which is called immediately + after the deferred times out, and not if this deferred is + otherwise cancelled before the timeout. + + It takes an arbitrary value, which is the value of the deferred at + that exact point in time (probably a CancelledError Failure), and + the timeout. + + The default callable (if none is provided) will translate a + CancelledError Failure into a DeferredTimeoutError. + """ + timed_out = [False] + + def time_it_out(): + timed_out[0] = True + deferred.cancel() + + delayed_call = reactor.callLater(timeout, time_it_out) + + def convert_cancelled(value): + if timed_out[0]: + to_call = on_timeout_cancel or _cancelled_to_timed_out_error + return to_call(value, timeout) + return value + + deferred.addBoth(convert_cancelled) + + def cancel_timeout(result): + # stop the pending call to cancel the deferred if it's been fired + if delayed_call.active(): + delayed_call.cancel() + return result + + deferred.addBoth(cancel_timeout) + + +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise DeferredTimeoutError(timeout, "Deferred") + return value -- cgit 1.5.1 From 2a13af23bc0561ab48e0a90528231c40ee209724 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:29:27 +0100 Subject: Use run_in_background in preference to preserve_fn While I was going through uses of preserve_fn for other PRs, I converted places which only use the wrapped function once to use run_in_background, to avoid creating the function object. --- synapse/app/federation_sender.py | 4 ++-- synapse/app/pusher.py | 4 ++-- synapse/app/synchrotron.py | 5 ++--- synapse/appservice/scheduler.py | 12 ++++++------ synapse/crypto/keyring.py | 28 ++++++++++++++++----------- synapse/federation/federation_client.py | 5 +++-- synapse/groups/attestations.py | 4 ++-- synapse/handlers/appservice.py | 5 ++++- synapse/handlers/e2e_keys.py | 6 +++--- synapse/handlers/federation.py | 16 +++++++++------ synapse/handlers/initial_sync.py | 12 +++++++----- synapse/handlers/message.py | 5 +++-- synapse/handlers/typing.py | 7 ++++--- synapse/push/pusherpool.py | 20 +++++++++++-------- synapse/rest/media/v1/preview_url_resource.py | 5 +++-- synapse/storage/events_worker.py | 5 +++-- synapse/storage/stream.py | 5 +++-- synapse/util/async.py | 4 ++-- synapse/util/file_consumer.py | 6 ++++-- synapse/util/logcontext.py | 2 +- synapse/util/ratelimitutils.py | 4 ++-- synapse/util/retryutils.py | 4 ++-- 22 files changed, 97 insertions(+), 71 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 0cc3331519..c6daa0d43f 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -38,7 +38,7 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.async import Linearizer from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor @@ -229,7 +229,7 @@ class FederationSenderHandler(object): # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) - preserve_fn(self.update_token)(token) + run_in_background(self.update_token, token) # We also need to poke the federation sender when new events happen elif stream_name == "events": diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index d5c3a85195..8bd5c0c2b7 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -33,7 +33,7 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor @@ -140,7 +140,7 @@ class PusherReplicationHandler(ReplicationClientHandler): def on_rdata(self, stream_name, token, rows): super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows) - preserve_fn(self.poke_pushers)(stream_name, token, rows) + run_in_background(self.poke_pushers, stream_name, token, rows) @defer.inlineCallbacks def poke_pushers(self, stream_name, token, rows): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2fddcd935a..0c4ccc58bc 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -51,7 +51,7 @@ from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.storage.roommember import RoomMemberStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string @@ -327,8 +327,7 @@ class SyncReplicationHandler(ReplicationClientHandler): def on_rdata(self, stream_name, token, rows): super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows) - - preserve_fn(self.process_and_notify)(stream_name, token, rows) + run_in_background(self.process_and_notify, stream_name, token, rows) def get_streams_to_replicate(self): args = super(SyncReplicationHandler, self).get_streams_to_replicate() diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 6da315473d..ba1631b5c8 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -51,7 +51,7 @@ components. from twisted.internet import defer from synapse.appservice import ApplicationServiceState -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure import logging @@ -106,7 +106,7 @@ class _ServiceQueuer(object): def enqueue(self, service, event): # if this service isn't being sent something self.queued_events.setdefault(service.id, []).append(event) - preserve_fn(self._send_request)(service) + run_in_background(self._send_request, service) @defer.inlineCallbacks def _send_request(self, service): @@ -152,10 +152,10 @@ class _TransactionController(object): if sent: yield txn.complete(self.store) else: - preserve_fn(self._start_recoverer)(service) - except Exception as e: - logger.exception(e) - preserve_fn(self._start_recoverer)(service) + run_in_background(self._start_recoverer, service) + except Exception: + logger.exception("Error creating appservice transaction") + run_in_background(self._start_recoverer, service) @defer.inlineCallbacks def on_recovered(self, recoverer): diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index fce83d445f..38944a7326 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -19,7 +19,8 @@ from synapse.api.errors import SynapseError, Codes from synapse.util import unwrapFirstError, logcontext from synapse.util.logcontext import ( PreserveLoggingContext, - preserve_fn + preserve_fn, + run_in_background, ) from synapse.util.metrics import Measure @@ -127,7 +128,7 @@ class Keyring(object): verify_requests.append(verify_request) - preserve_fn(self._start_key_lookups)(verify_requests) + run_in_background(self._start_key_lookups, verify_requests) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified @@ -313,7 +314,7 @@ class Keyring(object): if not verify_request.deferred.called: verify_request.deferred.errback(err) - preserve_fn(do_iterations)().addErrback(on_err) + run_in_background(do_iterations).addErrback(on_err) @defer.inlineCallbacks def get_keys_from_store(self, server_name_and_key_ids): @@ -329,8 +330,9 @@ class Keyring(object): """ res = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.get_server_verify_keys)( - server_name, key_ids + run_in_background( + self.store.get_server_verify_keys, + server_name, key_ids, ).addCallback(lambda ks, server: (server, ks), server_name) for server_name, key_ids in server_name_and_key_ids ], @@ -358,7 +360,7 @@ class Keyring(object): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(get_key)(p_name, p_keys) + run_in_background(get_key, p_name, p_keys) for p_name, p_keys in self.perspective_servers.items() ], consumeErrors=True, @@ -398,7 +400,7 @@ class Keyring(object): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(get_key)(server_name, key_ids) + run_in_background(get_key, server_name, key_ids) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, @@ -481,7 +483,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store_keys)( + run_in_background( + self.store_keys, server_name=server_name, from_server=perspective_name, verify_keys=response_keys, @@ -539,7 +542,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store_keys)( + run_in_background( + self.store_keys, server_name=key_server_name, from_server=server_name, verify_keys=verify_keys, @@ -615,7 +619,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.store_server_keys_json)( + run_in_background( + self.store.store_server_keys_json, server_name=server_name, key_id=key_id, from_server=server_name, @@ -716,7 +721,8 @@ class Keyring(object): # TODO(markjh): Store whether the keys have expired. return logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.store_server_verify_key)( + run_in_background( + self.store.store_server_verify_key, server_name, server_name, key.time_added, key ) for key_id, key in verify_keys.items() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8e2c0c4cd2..8adc60863e 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -33,7 +33,7 @@ from synapse.federation.federation_base import ( import synapse.metrics from synapse.util import logcontext, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination @@ -417,7 +417,8 @@ class FederationClient(FederationBase): batch = set(missing_events[i:i + batch_size]) deferreds = [ - preserve_fn(self.get_pdu)( + run_in_background( + self.get_pdu, destinations=random_server_list(), event_id=e_id, ) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 1fb709e6c3..5f53f17954 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -42,7 +42,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.types import get_domain_from_id -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from signedjson.sign import sign_json @@ -192,4 +192,4 @@ class GroupAttestionRenewer(object): group_id = row["group_id"] user_id = row["user_id"] - preserve_fn(_renew_attestation)(group_id, user_id) + run_in_background(_renew_attestation, group_id, user_id) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 0245197c02..6cc2388306 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -198,7 +198,10 @@ class ApplicationServicesHandler(object): services = yield self._get_services_for_3pn(protocol) results = yield make_deferred_yieldable(defer.DeferredList([ - preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields) + run_in_background( + self.appservice_api.query_3pe, + service, kind, protocol, fields, + ) for service in services ], consumeErrors=True)) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 325c0c4a9f..fc958404a1 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -24,7 +24,7 @@ from synapse.api.errors import ( SynapseError, CodeMessageException, FederationDeniedError, ) from synapse.types import get_domain_from_id, UserID -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -139,7 +139,7 @@ class E2eKeysHandler(object): failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(do_remote_query)(destination) + run_in_background(do_remote_query, destination) for destination in remote_queries_not_in_cache ])) @@ -242,7 +242,7 @@ class E2eKeysHandler(object): failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(claim_client_keys)(destination) + run_in_background(claim_client_keys, destination) for destination in remote_queries ])) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ae7e0d6da2..c66ca0f381 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -637,7 +637,8 @@ class FederationHandler(BaseHandler): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - logcontext.preserve_fn(self.replication_layer.get_pdu)( + logcontext.run_in_background( + self.replication_layer.get_pdu, [dest], event_id, outlier=True, @@ -1023,7 +1024,7 @@ class FederationHandler(BaseHandler): # lots of requests for missing prev_events which we do actually # have. Hence we fire off the deferred, but don't wait for it. - logcontext.preserve_fn(self._handle_queued_pdus)(room_queue) + logcontext.run_in_background(self._handle_queued_pdus, room_queue) defer.returnValue(True) @@ -1523,8 +1524,9 @@ class FederationHandler(BaseHandler): if not backfilled: # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - logcontext.preserve_fn(self.pusher_pool.on_new_notifications)( - event_stream_id, max_stream_id + logcontext.run_in_background( + self.pusher_pool.on_new_notifications, + event_stream_id, max_stream_id, ) defer.returnValue((context, event_stream_id, max_stream_id)) @@ -1538,7 +1540,8 @@ class FederationHandler(BaseHandler): """ contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - logcontext.preserve_fn(self._prep_event)( + logcontext.run_in_background( + self._prep_event, origin, ev_info["event"], state=ev_info.get("state"), @@ -1867,7 +1870,8 @@ class FederationHandler(BaseHandler): different_events = yield logcontext.make_deferred_yieldable( defer.gatherResults([ - logcontext.preserve_fn(self.store.get_event)( + logcontext.run_in_background( + self.store.get_event, d, allow_none=True, allow_rejected=False, diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index c5267b4b84..cd33a86599 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util import unwrapFirstError from synapse.util.async import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -166,7 +166,8 @@ class InitialSyncHandler(BaseHandler): (messages, token), current_state = yield make_deferred_yieldable( defer.gatherResults( [ - preserve_fn(self.store.get_recent_events_for_room)( + run_in_background( + self.store.get_recent_events_for_room, event.room_id, limit=limit, end_token=room_end_token, @@ -391,9 +392,10 @@ class InitialSyncHandler(BaseHandler): presence, receipts, (messages, token) = yield defer.gatherResults( [ - preserve_fn(get_presence)(), - preserve_fn(get_receipts)(), - preserve_fn(self.store.get_recent_events_for_room)( + run_in_background(get_presence), + run_in_background(get_receipts), + run_in_background( + self.store.get_recent_events_for_room, room_id, limit=limit, end_token=now_token.room_key, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..244b98dd8d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -850,7 +850,8 @@ class EventCreationHandler(object): # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - preserve_fn(self.pusher_pool.on_new_notifications)( + run_in_background( + self.pusher_pool.on_new_notifications, event_stream_id, max_stream_id ) @@ -862,7 +863,7 @@ class EventCreationHandler(object): extra_users=extra_users ) - preserve_fn(_notify)() + run_in_background(_notify) if event.type == EventTypes.Message: presence = self.hs.get_presence_handler() diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77c0cf146f..19cde70adf 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID, get_domain_from_id @@ -97,7 +97,8 @@ class TypingHandler(object): if self.hs.is_mine_id(member.user_id): last_fed_poke = self._member_last_federation_poke.get(member, None) if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now: - preserve_fn(self._push_remote)( + run_in_background( + self._push_remote, member=member, typing=True ) @@ -196,7 +197,7 @@ class TypingHandler(object): def _push_update(self, member, typing): if self.hs.is_mine_id(member.user_id): # Only send updates for changes to our own users. - preserve_fn(self._push_remote)(member, typing) + run_in_background(self._push_remote, member, typing) self._push_update_local( member=member, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 134e89b371..7bb5733090 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -14,13 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from .pusher import PusherFactory -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.push.pusher import PusherFactory from synapse.util.async import run_on_reactor - -import logging +from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -137,8 +137,9 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): deferreds.append( - preserve_fn(p.on_new_notifications)( - min_stream_id, max_stream_id + run_in_background( + p.on_new_notifications, + min_stream_id, max_stream_id, ) ) @@ -164,7 +165,10 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): deferreds.append( - preserve_fn(p.on_new_receipts)(min_stream_id, max_stream_id) + run_in_background( + p.on_new_receipts, + min_stream_id, max_stream_id, + ) ) yield make_deferred_yieldable(defer.gatherResults(deferreds)) @@ -207,7 +211,7 @@ class PusherPool: if appid_pushkey in byuser: byuser[appid_pushkey].on_stop() byuser[appid_pushkey] = p - preserve_fn(p.on_started)() + run_in_background(p.on_started) logger.info("Started pushers") diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0fc21540c6..9290d7946f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -35,7 +35,7 @@ from ._base import FileInfo from synapse.api.errors import ( SynapseError, Codes, ) -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient @@ -144,7 +144,8 @@ class PreviewUrlResource(Resource): observable = self._cache.get(url) if not observable: - download = preserve_fn(self._do_preview)( + download = run_in_background( + self._do_preview, url, requester.user, ts, ) observable = ObservableDeferred( diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index a937b9bceb..ba834854e1 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -20,7 +20,7 @@ from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.util.logcontext import ( - preserve_fn, PreserveLoggingContext, make_deferred_yieldable + PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) from synapse.util.metrics import Measure from synapse.api.errors import SynapseError @@ -319,7 +319,8 @@ class EventsWorkerStore(SQLBaseStore): res = yield make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self._get_event_from_row)( + run_in_background( + self._get_event_from_row, row["internal_metadata"], row["json"], row["redacts"], rejected_reason=row["rejects"], ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2956c3b3e0..5b245a936c 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -41,7 +41,7 @@ from synapse.storage.events import EventsWorkerStore from synapse.util.caches.descriptors import cached from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.storage.engines import PostgresEngine, Sqlite3Engine import abc @@ -198,7 +198,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(self.get_room_events_stream_for_room)( + run_in_background( + self.get_room_events_stream_for_room, room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids diff --git a/synapse/util/async.py b/synapse/util/async.py index 0729bb2863..bd07067328 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -17,7 +17,7 @@ from twisted.internet import defer, reactor from .logcontext import ( - PreserveLoggingContext, make_deferred_yieldable, preserve_fn + PreserveLoggingContext, make_deferred_yieldable, run_in_background ) from synapse.util import logcontext, unwrapFirstError @@ -161,7 +161,7 @@ def concurrently_execute(func, args, limit): pass return logcontext.make_deferred_yieldable(defer.gatherResults([ - preserve_fn(_concurrently_execute_inner)() + run_in_background(_concurrently_execute_inner) for _ in xrange(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 3c8a165331..3380970e4e 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -15,7 +15,7 @@ from twisted.internet import threads, reactor -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from six.moves import queue @@ -70,7 +70,9 @@ class BackgroundFileConsumer(object): self._producer = producer self.streaming = streaming - self._finished_deferred = preserve_fn(threads.deferToThread)(self._writer) + self._finished_deferred = run_in_background( + threads.deferToThread, self._writer + ) if not streaming: self._producer.resumeProducing() diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..c2edf87e58 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -341,7 +341,7 @@ def make_deferred_yieldable(deferred): returning a deferred. Then, when the deferred completes, restores the current logcontext before running callbacks/errbacks. - (This is more-or-less the opposite operation to preserve_fn.) + (This is more-or-less the opposite operation to run_in_background.) """ if isinstance(deferred, defer.Deferred) and not deferred.called: prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 1101881a2d..18424f6c36 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background import collections import contextlib @@ -150,7 +150,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = preserve_fn(sleep)(self.sleep_msec / 1000.0) + ret_defer = run_in_background(sleep, self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 47b0bb5eb3..4e93f69d3a 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -203,8 +203,8 @@ class RetryDestinationLimiter(object): ) except Exception: logger.exception( - "Failed to store set_destination_retry_timings", + "Failed to store destination_retry_timings", ) # we deliberately do this in the background. - synapse.util.logcontext.preserve_fn(store_retry_timings)() + synapse.util.logcontext.run_in_background(store_retry_timings) -- cgit 1.5.1 From 4f2f5171b752169e265b5d93c9b4e788171b5326 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 11:39:37 +0200 Subject: replace stringIO imports --- synapse/http/client.py | 4 ++-- synapse/util/logformatter.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/client.py b/synapse/http/client.py index 62309c3365..70a19d9b74 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -40,7 +40,7 @@ from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone -from StringIO import StringIO +from six import StringIO import simplejson as json import logging @@ -507,7 +507,7 @@ class SpiderHttpClient(SimpleHttpClient): reactor, SpiderEndpointFactory(hs) ) - ), [('gzip', GzipDecoder)] + ), [(b'gzip', GzipDecoder)] ) # We could look like Chrome: # self.user_agent = ("Mozilla/5.0 (%s) (KHTML, like Gecko) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index cdbc4bffd7..59ab3c6968 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -14,7 +14,7 @@ # limitations under the License. -import StringIO +from six import StringIO import logging import traceback -- cgit 1.5.1 From d82b6ea9e68150aece1fc46cb0821b31cf728910 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 13:57:00 +0200 Subject: Move more xrange to six plus a bonus next() Signed-off-by: Adrian Tschira --- synapse/federation/federation_client.py | 4 +++- synapse/handlers/room_list.py | 4 +++- synapse/storage/registration.py | 4 +++- synapse/storage/schema/delta/30/as_users.py | 4 +++- synapse/storage/stream.py | 4 +++- synapse/storage/tags.py | 4 +++- synapse/util/async.py | 6 ++++-- synapse/util/stringutils.py | 5 +++-- synapse/util/wheel_timer.py | 4 +++- 9 files changed, 28 insertions(+), 11 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8e2c0c4cd2..a2067fc390 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -19,6 +19,8 @@ import itertools import logging import random +from six.moves import range + from twisted.internet import defer from synapse.api.constants import Membership @@ -413,7 +415,7 @@ class FederationClient(FederationBase): batch_size = 20 missing_events = list(missing_events) - for i in xrange(0, len(missing_events), batch_size): + for i in range(0, len(missing_events), batch_size): batch = set(missing_events[i:i + batch_size]) deferreds = [ diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index add3f9b009..5757bb7f8a 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -15,6 +15,8 @@ from twisted.internet import defer +from six.moves import range + from ._base import BaseHandler from synapse.api.constants import ( @@ -200,7 +202,7 @@ class RoomListHandler(BaseHandler): step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1 chunk = [] - for i in xrange(0, len(rooms_to_scan), step): + for i in range(0, len(rooms_to_scan), step): batch = rooms_to_scan[i:i + step] logger.info("Processing %i rooms for result", len(batch)) yield concurrently_execute( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 6b557ca0cf..a50717db2d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -22,6 +22,8 @@ from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from six.moves import range + class RegistrationWorkerStore(SQLBaseStore): @cached() @@ -469,7 +471,7 @@ class RegistrationStore(RegistrationWorkerStore, match = regex.search(user_id) if match: found.add(int(match.group(1))) - for i in xrange(len(found) + 1): + for i in range(len(found) + 1): if i not in found: return i diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index c53e53c94f..85bd1a2006 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -14,6 +14,8 @@ import logging from synapse.config.appservice import load_appservices +from six.moves import range + logger = logging.getLogger(__name__) @@ -58,7 +60,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): for as_id, user_ids in owned.items(): n = 100 - user_chunks = (user_ids[i:i + 100] for i in xrange(0, len(user_ids), n)) + user_chunks = (user_ids[i:i + 100] for i in range(0, len(user_ids), n)) for chunk in user_chunks: cur.execute( database_engine.convert_param_style( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3b8b539993..52c90d8e04 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -47,6 +47,8 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine import abc import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -196,7 +198,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): results = {} room_ids = list(room_ids) - for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): + for rm_ids in (room_ids[i:i + 20] for i in range(0, len(room_ids), 20)): res = yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 13bff9f055..6671d3cfca 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -22,6 +22,8 @@ from twisted.internet import defer import simplejson as json import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -98,7 +100,7 @@ class TagsWorkerStore(AccountDataWorkerStore): batch_size = 50 results = [] - for i in xrange(0, len(tag_ids), batch_size): + for i in range(0, len(tag_ids), batch_size): tags = yield self.runInteraction( "get_all_updated_tag_content", get_tag_content, diff --git a/synapse/util/async.py b/synapse/util/async.py index 1df5c5600c..cb53c31123 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -27,6 +27,8 @@ from contextlib import contextmanager import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -158,13 +160,13 @@ def concurrently_execute(func, args, limit): def _concurrently_execute_inner(): try: while True: - yield func(it.next()) + yield func(next(it)) except StopIteration: pass return logcontext.make_deferred_yieldable(defer.gatherResults([ preserve_fn(_concurrently_execute_inner)() - for _ in xrange(limit) + for _ in range(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 95a6168e16..b98b9dc6e4 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -15,6 +15,7 @@ import random import string +from six.moves import range _string_with_symbols = ( string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" @@ -22,12 +23,12 @@ _string_with_symbols = ( def random_string(length): - return ''.join(random.choice(string.ascii_letters) for _ in xrange(length)) + return ''.join(random.choice(string.ascii_letters) for _ in range(length)) def random_string_with_symbols(length): return ''.join( - random.choice(_string_with_symbols) for _ in xrange(length) + random.choice(_string_with_symbols) for _ in range(length) ) diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index b70f9a6b0a..7a9e45aca9 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from six.moves import range + class _Entry(object): __slots__ = ["end_key", "queue"] @@ -68,7 +70,7 @@ class WheelTimer(object): # Add empty entries between the end of the current list and when we want # to insert. This ensures there are no gaps. self.entries.extend( - _Entry(key) for key in xrange(last_key, then_key + 1) + _Entry(key) for key in range(last_key, then_key + 1) ) self.entries[-1].queue.append(obj) -- cgit 1.5.1 From e9143b659352e87fd9e26c8e5a771c78011bc945 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 23:56:59 +0200 Subject: more bytes strings Signed-off-by: Adrian Tschira --- synapse/http/endpoint.py | 2 +- synapse/http/server.py | 2 +- synapse/rest/media/v1/upload_resource.py | 6 +++--- synapse/util/httpresourcetree.py | 7 +++++-- 4 files changed, 10 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 00572c2897..db455e5909 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -286,7 +286,7 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t if (len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload - and answers[0].payload.target == dns.Name('.')): + and answers[0].payload.target == dns.Name(b'.')): raise ConnectError("Service %s unavailable" % service_name) for answer in answers: diff --git a/synapse/http/server.py b/synapse/http/server.py index 8d632290de..55b9ad5251 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -546,6 +546,6 @@ def _request_user_agent_is_curl(request): b"User-Agent", default=[] ) for user_agent in user_agents: - if "curl" in user_agent: + if b"curl" in user_agent: return True return False diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index f6f498cdc5..a31e75cb46 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -81,15 +81,15 @@ class UploadResource(Resource): headers = request.requestHeaders if headers.hasHeader("Content-Type"): - media_type = headers.getRawHeaders("Content-Type")[0] + media_type = headers.getRawHeaders(b"Content-Type")[0] else: raise SynapseError( msg="Upload request missing 'Content-Type'", code=400, ) - # if headers.hasHeader("Content-Disposition"): - # disposition = headers.getRawHeaders("Content-Disposition")[0] + # if headers.hasHeader(b"Content-Disposition"): + # disposition = headers.getRawHeaders(b"Content-Disposition")[0] # TODO(markjh): parse content-dispostion content_uri = yield self.media_repo.create_content( diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index d747849553..e9f0f292ee 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -40,9 +40,12 @@ def create_resource_tree(desired_tree, root_resource): # extra resources to existing nodes. See self._resource_id for the key. resource_mappings = {} for full_path, res in desired_tree.items(): + # twisted requires all resources to be bytes + full_path = full_path.encode("utf-8") + logger.info("Attaching %s to path %s", res, full_path) last_resource = root_resource - for path_seg in full_path.split('/')[1:-1]: + for path_seg in full_path.split(b'/')[1:-1]: if path_seg not in last_resource.listNames(): # resource doesn't exist, so make a "dummy resource" child_resource = NoResource() @@ -57,7 +60,7 @@ def create_resource_tree(desired_tree, root_resource): # =========================== # now attach the actual desired resource - last_path_seg = full_path.split('/')[-1] + last_path_seg = full_path.split(b'/')[-1] # if there is already a resource here, thieve its children and # replace it -- cgit 1.5.1 From e482f8cd8504b36dc1ce2c1e51e0dee479d33249 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 09:12:26 +0100 Subject: Fix incorrect reference to StringIO This was introduced in 4f2f5171 --- synapse/util/logformatter.py | 2 +- tests/util/test_logformatter.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/util/test_logformatter.py (limited to 'synapse/util') diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index 59ab3c6968..3e42868ea9 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -32,7 +32,7 @@ class LogFormatter(logging.Formatter): super(LogFormatter, self).__init__(*args, **kwargs) def formatException(self, ei): - sio = StringIO.StringIO() + sio = StringIO() (typ, val, tb) = ei # log the stack above the exception capture point if possible, but diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py new file mode 100644 index 0000000000..1a1a8412f2 --- /dev/null +++ b/tests/util/test_logformatter.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from synapse.util.logformatter import LogFormatter +from tests import unittest + + +class TestException(Exception): + pass + + +class LogFormatterTestCase(unittest.TestCase): + def test_formatter(self): + formatter = LogFormatter() + + try: + raise TestException("testytest") + except TestException: + ei = sys.exc_info() + + output = formatter.formatException(ei) + + # check the output looks vaguely sane + self.assertIn("testytest", output) + self.assertIn("Capture point", output) -- cgit 1.5.1 From f22e7cda2c72d461acba664cb083e8c4e3c7572a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 11:46:23 +0100 Subject: Fix a class of logcontext leaks So, it turns out that if you have a first `Deferred` `D1`, you can add a callback which returns another `Deferred` `D2`, and `D2` must then complete before any further callbacks on `D1` will execute (and later callbacks on `D1` get the *result* of `D2` rather than `D2` itself). So, `D1` might have `called=True` (as in, it has started running its callbacks), but any new callbacks added to `D1` won't get run until `D2` completes - so if you `yield D1` in an `inlineCallbacks` function, your `yield` will 'block'. In conclusion: some of our assumptions in `logcontext` were invalid. We need to make sure that we don't optimise out the logcontext juggling when this situation happens. Fortunately, it is easy to detect by checking `D1.paused`. --- synapse/util/logcontext.py | 60 ++++++++++++++++++++++++-------------- tests/util/test_logcontext.py | 67 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 94 insertions(+), 33 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 01ac71e53e..e086e12213 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -302,7 +302,7 @@ def preserve_fn(f): def run_in_background(f, *args, **kwargs): """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the - deferred returned by the funtion completes. + deferred returned by the function completes. Useful for wrapping functions that return a deferred which you don't yield on (for instance because you want to pass it to deferred.gatherResults()). @@ -320,24 +320,31 @@ def run_in_background(f, *args, **kwargs): # by synchronous exceptions, so let's turn them into Failures. return defer.fail() - if isinstance(res, defer.Deferred) and not res.called: - # The function will have reset the context before returning, so - # we need to restore it now. - LoggingContext.set_current_context(current) - - # The original context will be restored when the deferred - # completes, but there is nothing waiting for it, so it will - # get leaked into the reactor or some other function which - # wasn't expecting it. We therefore need to reset the context - # here. - # - # (If this feels asymmetric, consider it this way: we are - # effectively forking a new thread of execution. We are - # probably currently within a ``with LoggingContext()`` block, - # which is supposed to have a single entry and exit point. But - # by spawning off another deferred, we are effectively - # adding a new exit point.) - res.addBoth(_set_context_cb, LoggingContext.sentinel) + if not isinstance(res, defer.Deferred): + return res + + if res.called and not res.paused: + # The function should have maintained the logcontext, so we can + # optimise out the messing about + return res + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = LoggingContext.set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + res.addBoth(_set_context_cb, ctx) return res @@ -354,9 +361,18 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to run_in_background.) """ - if isinstance(deferred, defer.Deferred) and not deferred.called: - prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) - deferred.addBoth(_set_context_cb, prev_context) + if not isinstance(deferred, defer.Deferred): + return deferred + + if deferred.called and not deferred.paused: + # it looks like this deferred is ready to run any callbacks we give it + # immediately. We may as well optimise out the logcontext faffery. + return deferred + + # ok, we can't be sure that a yield won't block, so let's reset the + # logcontext, and add a callback to the deferred to restore it. + prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + deferred.addBoth(_set_context_cb, prev_context) return deferred diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 4850722bc5..7707fbb1f0 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -36,24 +36,28 @@ class LoggingContextTestCase(unittest.TestCase): yield sleep(0) self._check_test_key("one") - def _test_preserve_fn(self, function): + def _test_run_in_background(self, function): sentinel_context = LoggingContext.current_context() callback_completed = [False] - @defer.inlineCallbacks - def cb(): + def test(): context_one.request = "one" - yield function() - self._check_test_key("one") + d = function() - callback_completed[0] = True + def cb(res): + self._check_test_key("one") + callback_completed[0] = True + return res + d.addCallback(cb) + + return d with LoggingContext() as context_one: context_one.request = "one" # fire off function, but don't wait on it. - logcontext.preserve_fn(cb)() + logcontext.run_in_background(test) self._check_test_key("one") @@ -80,20 +84,31 @@ class LoggingContextTestCase(unittest.TestCase): # test is done once d2 finishes return d2 - def test_preserve_fn_with_blocking_fn(self): + def test_run_in_background_with_blocking_fn(self): @defer.inlineCallbacks def blocking_function(): yield sleep(0) - return self._test_preserve_fn(blocking_function) + return self._test_run_in_background(blocking_function) - def test_preserve_fn_with_non_blocking_fn(self): + def test_run_in_background_with_non_blocking_fn(self): @defer.inlineCallbacks def nonblocking_function(): with logcontext.PreserveLoggingContext(): yield defer.succeed(None) - return self._test_preserve_fn(nonblocking_function) + return self._test_run_in_background(nonblocking_function) + + @unittest.DEBUG + def test_run_in_background_with_chained_deferred(self): + # a function which returns a deferred which looks like it has been + # called, but is actually paused + def testfunc(): + return logcontext.make_deferred_yieldable( + _chained_deferred_function() + ) + + return self._test_run_in_background(testfunc) @defer.inlineCallbacks def test_make_deferred_yieldable(self): @@ -118,6 +133,22 @@ class LoggingContextTestCase(unittest.TestCase): # now it should be restored self._check_test_key("one") + @defer.inlineCallbacks + def test_make_deferred_yieldable_with_chained_deferreds(self): + sentinel_context = LoggingContext.current_context() + + with LoggingContext() as context_one: + context_one.request = "one" + + d1 = logcontext.make_deferred_yieldable(_chained_deferred_function()) + # make sure that the context was reset by make_deferred_yieldable + self.assertIs(LoggingContext.current_context(), sentinel_context) + + yield d1 + + # now it should be restored + self._check_test_key("one") + @defer.inlineCallbacks def test_make_deferred_yieldable_on_non_deferred(self): """Check that make_deferred_yieldable does the right thing when its @@ -132,3 +163,17 @@ class LoggingContextTestCase(unittest.TestCase): r = yield d1 self.assertEqual(r, "bum") self._check_test_key("one") + + +# a function which returns a deferred which has been "called", but +# which had a function which returned another incomplete deferred on +# its callback list, so won't yet call any other new callbacks. +def _chained_deferred_function(): + d = defer.succeed(None) + + def cb(res): + d2 = defer.Deferred() + reactor.callLater(0, d2.callback, res) + return d2 + d.addCallback(cb) + return d -- cgit 1.5.1 From a7fe62f0cb7bdb40e5c9b7f09f694e9c0913610c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 May 2018 11:16:36 +0100 Subject: Fix logcontext leaks in rate limiter --- synapse/util/ratelimitutils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 18424f6c36..0ab63c3d7d 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,7 +18,10 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep -from synapse.util.logcontext import run_in_background +from synapse.util.logcontext import ( + run_in_background, make_deferred_yieldable, + PreserveLoggingContext, +) import collections import contextlib @@ -176,6 +179,9 @@ class _PerHostRatelimiter(object): return r def on_err(r): + # XXX: why is this necessary? this is called before we start + # processing the request so why would the request be in + # current_processing? self.current_processing.discard(request_id) return r @@ -187,7 +193,7 @@ class _PerHostRatelimiter(object): ret_defer.addCallbacks(on_start, on_err) ret_defer.addBoth(on_both) - return ret_defer + return make_deferred_yieldable(ret_defer) def _on_exit(self, request_id): logger.debug( @@ -197,7 +203,12 @@ class _PerHostRatelimiter(object): self.current_processing.discard(request_id) try: request_id, deferred = self.ready_request_queue.popitem() + + # XXX: why do we do the following? the on_start callback above will + # do it for us. self.current_processing.add(request_id) - deferred.callback(None) + + with PreserveLoggingContext(): + deferred.callback(None) except KeyError: pass -- cgit 1.5.1 From 73cbdef5f773ddd091713d71ac9c63639d7c0825 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Tue, 15 May 2018 17:55:46 +0200 Subject: fix py3 intern and remove unnecessary py3 encode Signed-off-by: Adrian Tschira --- synapse/util/caches/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 4adae96681..329ccbb866 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -16,6 +16,9 @@ import synapse.metrics import os +from six.moves import intern +import six + CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) metrics = synapse.metrics.get_metrics_for("synapse.util.caches") @@ -66,7 +69,9 @@ def intern_string(string): return None try: - string = string.encode("ascii") + if six.PY2: + string = string.encode("ascii") + return intern(string) except UnicodeEncodeError: return string -- cgit 1.5.1 From 45b55e23d34371986ee0da5be784a1f2134fd58a Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 29 Apr 2018 13:54:38 +0200 Subject: Add batch_iter to utils There's a frequent idiom I noticed where an iterable is split up into a number of chunks/batches. Unfortunately that method does not work with iterators like dict.keys() in python3. This implementation works with iterators. Signed-off-by: Adrian Tschira --- synapse/util/__init__.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'synapse/util') diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 814a7bf71b..fc11e26623 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -20,6 +20,8 @@ from twisted.internet import defer, reactor, task import time import logging +from itertools import islice + logger = logging.getLogger(__name__) @@ -79,3 +81,19 @@ class Clock(object): except Exception: if not ignore_errs: raise + + +def batch_iter(iterable, size): + """batch an iterable up into tuples with a maximum size + + Args: + iterable (iterable): the iterable to slice + size (int): the maximum batch size + + Returns: + an iterator over the chunks + """ + # make sure we can deal with iterables like lists too + sourceiter = iter(iterable) + # call islice until it returns an empty tuple + return iter(lambda: tuple(islice(sourceiter, size)), ()) -- cgit 1.5.1 From df9f72d9e5fe264b86005208e0f096156eb03e4b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 21 May 2018 19:47:37 -0500 Subject: replacing portions --- synapse/api/auth.py | 2 +- synapse/federation/federation_client.py | 17 ++- synapse/federation/federation_server.py | 16 ++- synapse/federation/send_queue.py | 8 +- synapse/federation/transaction_queue.py | 47 +++----- synapse/handlers/appservice.py | 19 +-- synapse/handlers/presence.py | 58 ++++----- synapse/http/client.py | 20 +--- synapse/http/matrixfederationclient.py | 14 +-- synapse/metrics/__init__.py | 182 ++++++++++------------------- synapse/notifier.py | 23 ++-- synapse/push/httppusher.py | 13 +-- synapse/push/push_rule_evaluator.py | 2 +- synapse/python_dependencies.py | 1 + synapse/replication/tcp/protocol.py | 88 ++++++-------- synapse/util/caches/__init__.py | 57 ++++++--- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/dictionary_cache.py | 2 +- synapse/util/caches/expiringcache.py | 4 +- synapse/util/caches/response_cache.py | 11 +- synapse/util/caches/stream_change_cache.py | 2 +- synapse/util/metrics.py | 91 ++++----------- tests/__init__.py | 3 + 23 files changed, 268 insertions(+), 414 deletions(-) (limited to 'synapse/util') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index f17fda6315..b052cf532b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -57,7 +57,7 @@ class Auth(object): self.TOKEN_NOT_FOUND_HTTP_STATUS = 401 self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000) - register_cache("token_cache", self.token_cache) + register_cache("cache", "token_cache", self.token_cache) @defer.inlineCallbacks def check_from_context(self, event, context, do_sig_check=True): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6163f7c466..2761ffae07 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -32,20 +32,17 @@ from synapse.federation.federation_base import ( FederationBase, event_from_pdu_json, ) -import synapse.metrics from synapse.util import logcontext, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination -logger = logging.getLogger(__name__) - +from prometheus_client import Counter -# synapse.federation.federation_client is a silly name -metrics = synapse.metrics.get_metrics_for("synapse.federation.client") +logger = logging.getLogger(__name__) -sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"]) +sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"]) PDU_RETRY_TIME_MS = 1 * 60 * 1000 @@ -108,7 +105,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc(query_type) + sent_queries_counter.labels(query_type).inc() return self.transport_layer.make_query( destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail, @@ -127,7 +124,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc("client_device_keys") + sent_queries_counter.labels("client_device_keys").inc() return self.transport_layer.query_client_keys( destination, content, timeout ) @@ -137,7 +134,7 @@ class FederationClient(FederationBase): """Query the device keys for a list of user ids hosted on a remote server. """ - sent_queries_counter.inc("user_devices") + sent_queries_counter.labels("user_devices").inc() return self.transport_layer.query_user_devices( destination, user_id, timeout ) @@ -154,7 +151,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc("client_one_time_keys") + sent_queries_counter.labels("client_one_time_keys").inc() return self.transport_layer.claim_client_keys( destination, content, timeout ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 247ddc89d5..8211273006 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -27,12 +27,13 @@ from synapse.federation.federation_base import ( from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction -import synapse.metrics from synapse.types import get_domain_from_id from synapse.util import async from synapse.util.caches.response_cache import ResponseCache from synapse.util.logutils import log_function +from prometheus_client import Counter + from six import iteritems # when processing incoming transactions, we try to handle multiple rooms in @@ -41,14 +42,11 @@ TRANSACTION_CONCURRENCY_LIMIT = 10 logger = logging.getLogger(__name__) -# synapse.federation.federation_server is a silly name -metrics = synapse.metrics.get_metrics_for("synapse.federation.server") - -received_pdus_counter = metrics.register_counter("received_pdus") +received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") -received_edus_counter = metrics.register_counter("received_edus") +received_edus_counter = Counter("synapse_federation_server_received_edus", "") -received_queries_counter = metrics.register_counter("received_queries", labels=["type"]) +received_queries_counter = Counter("synapse_federation_server_received_queries", "", ["type"]) class FederationServer(FederationBase): @@ -131,7 +129,7 @@ class FederationServer(FederationBase): logger.debug("[%s] Transaction is new", transaction.transaction_id) - received_pdus_counter.inc_by(len(transaction.pdus)) + received_pdus_counter.inc(len(transaction.pdus)) pdus_by_room = {} @@ -292,7 +290,7 @@ class FederationServer(FederationBase): @defer.inlineCallbacks def on_query_request(self, query_type, args): - received_queries_counter.inc(query_type) + received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) defer.returnValue((200, resp)) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0f0c687b37..e6e1888f3a 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -33,7 +33,7 @@ from .units import Edu from synapse.storage.presence import UserPresenceState from synapse.util.metrics import Measure -import synapse.metrics +from synapse.metrics import LaterGauge from blist import sorteddict from collections import namedtuple @@ -45,9 +45,6 @@ from six import itervalues, iteritems logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - - class FederationRemoteSendQueue(object): """A drop in replacement for TransactionQueue""" @@ -77,8 +74,7 @@ class FederationRemoteSendQueue(object): # lambda binds to the queue rather than to the name of the queue which # changes. ARGH. def register(name, queue): - metrics.register_callback( - queue_name + "_size", + LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), "", lambda: len(queue), ) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index ded2b1871a..778924a13c 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -26,23 +26,18 @@ from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.metrics import measure_func from synapse.handlers.presence import format_user_presence_state, get_interested_remotes import synapse.metrics +from synapse.metrics import LaterGauge +from synapse.metrics import ( + sent_edus_counter, sent_transactions_counter, events_processed_counter) + +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client") -sent_pdus_destination_dist = client_metrics.register_distribution( - "sent_pdu_destinations" -) -sent_edus_counter = client_metrics.register_counter("sent_edus") - -sent_transactions_counter = client_metrics.register_counter("sent_transactions") - -events_processed_counter = client_metrics.register_counter("events_processed") +sent_pdus_destination_dist = Counter("synapse_federation_client_sent_pdu_destinations", "") class TransactionQueue(object): @@ -69,8 +64,7 @@ class TransactionQueue(object): # done self.pending_transactions = {} - metrics.register_callback( - "pending_destinations", + LaterGauge("pending_destinations", "", [], lambda: len(self.pending_transactions), ) @@ -94,12 +88,12 @@ class TransactionQueue(object): # Map of destination -> (edu_type, key) -> Edu self.pending_edus_keyed_by_dest = edus_keyed = {} - metrics.register_callback( - "pending_pdus", + LaterGauge( + "pending_pdus", "", [], lambda: sum(map(len, pdus.values())), ) - metrics.register_callback( - "pending_edus", + LaterGauge( + "pending_edus", "", [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) @@ -241,18 +235,15 @@ class TransactionQueue(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_lag.set( - now - ts, "federation_sender", - ) - synapse.metrics.event_processing_last_ts.set( - ts, "federation_sender", - ) + synapse.metrics.event_processing_lag.labels( + "federation_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels( + "federation_sender").set(ts) - events_processed_counter.inc_by(len(events)) + events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_positions.set( - next_token, "federation_sender", - ) + synapse.metrics.event_processing_positions.labels( + "federation_sender").set(next_token) finally: self._is_processing = False @@ -275,7 +266,7 @@ class TransactionQueue(object): if not destinations: return - sent_pdus_destination_dist.inc_by(len(destinations)) + sent_pdus_destination_dist.inc(len(destinations)) for destination in destinations: self.pending_pdus_by_dest.setdefault(destination, []).append( diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index b596f098fd..a7345331af 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -21,14 +21,13 @@ from synapse.util.metrics import Measure from synapse.util.logcontext import ( make_deferred_yieldable, run_in_background, ) +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -events_processed_counter = metrics.register_counter("events_processed") +events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "") def log_failure(failure): @@ -128,18 +127,12 @@ class ApplicationServicesHandler(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_positions.set( - upper_bound, "appservice_sender", - ) + synapse.metrics.event_processing_positions.labels("appservice_sender").set(upper_bound) - events_processed_counter.inc_by(len(events)) + events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_lag.set( - now - ts, "appservice_sender", - ) - synapse.metrics.event_processing_last_ts.set( - ts, "appservice_sender", - ) + synapse.metrics.event_processing_lag.labels("appservice_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels("appservice_sender").set(ts) finally: self.is_processing = False diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 585f3e4da2..06d937ef3a 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -36,27 +36,27 @@ from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID, get_domain_from_id -import synapse.metrics +from synapse.metrics import LaterGauge import logging +from prometheus_client import Counter + logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) -notified_presence_counter = metrics.register_counter("notified_presence") -federation_presence_out_counter = metrics.register_counter("federation_presence_out") -presence_updates_counter = metrics.register_counter("presence_updates") -timers_fired_counter = metrics.register_counter("timers_fired") -federation_presence_counter = metrics.register_counter("federation_presence") -bump_active_time_counter = metrics.register_counter("bump_active_time") +notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "") +federation_presence_out_counter = Counter("synapse_handler_presence_federation_presence_out", "") +presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "") +timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "") +federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "") +bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "") -get_updates_counter = metrics.register_counter("get_updates", labels=["type"]) +get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"]) -notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"]) -state_transition_counter = metrics.register_counter( - "state_transition", labels=["from", "to"] +notify_reason_counter = Counter("synapse_handler_presence_notify_reason", "", ["reason"]) +state_transition_counter = Counter("synapse_handler_presence_state_transition", "", ["from", "to"] ) @@ -137,9 +137,9 @@ class PresenceHandler(object): for state in active_presence } - metrics.register_callback( - "user_to_current_state_size", lambda: len(self.user_to_current_state) - ) + LaterGauge( + "user_to_current_state_size", "", [], lambda: len(self.user_to_current_state) + ).register() now = self.clock.time_msec() for state in active_presence: @@ -208,7 +208,7 @@ class PresenceHandler(object): 60 * 1000, ) - metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer)) + LaterGauge("wheel_timer_size", "", [], lambda: len(self.wheel_timer)).register() @defer.inlineCallbacks def _on_shutdown(self): @@ -311,10 +311,10 @@ class PresenceHandler(object): # TODO: We should probably ensure there are no races hereafter - presence_updates_counter.inc_by(len(new_states)) + presence_updates_counter.inc(len(new_states)) if to_notify: - notified_presence_counter.inc_by(len(to_notify)) + notified_presence_counter.inc(len(to_notify)) yield self._persist_and_notify(to_notify.values()) self.unpersisted_users_changes |= set(s.user_id for s in new_states) @@ -325,7 +325,7 @@ class PresenceHandler(object): if user_id not in to_notify } if to_federation_ping: - federation_presence_out_counter.inc_by(len(to_federation_ping)) + federation_presence_out_counter.inc(len(to_federation_ping)) self._push_to_remotes(to_federation_ping.values()) @@ -363,7 +363,7 @@ class PresenceHandler(object): for user_id in users_to_check ] - timers_fired_counter.inc_by(len(states)) + timers_fired_counter.inc(len(states)) changes = handle_timeouts( states, @@ -707,7 +707,7 @@ class PresenceHandler(object): updates.append(prev_state.copy_and_replace(**new_fields)) if updates: - federation_presence_counter.inc_by(len(updates)) + federation_presence_counter.inc(len(updates)) yield self._update_states(updates) @defer.inlineCallbacks @@ -982,28 +982,28 @@ def should_notify(old_state, new_state): return False if old_state.status_msg != new_state.status_msg: - notify_reason_counter.inc("status_msg_change") + notify_reason_counter.labels("status_msg_change").inc() return True if old_state.state != new_state.state: - notify_reason_counter.inc("state_change") - state_transition_counter.inc(old_state.state, new_state.state) + notify_reason_counter.labels("state_change").inc() + state_transition_counter.labels(old_state.state, new_state.state).inc() return True if old_state.state == PresenceState.ONLINE: if new_state.currently_active != old_state.currently_active: - notify_reason_counter.inc("current_active_change") + notify_reason_counter.labels("current_active_change").inc() return True if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Only notify about last active bumps if we're not currently acive if not new_state.currently_active: - notify_reason_counter.inc("last_active_change_online") + notify_reason_counter.labels("last_active_change_online").inc() return True elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Always notify for a transition where last active gets bumped. - notify_reason_counter.inc("last_active_change_not_online") + notify_reason_counter.labels("last_active_change_not_online").inc() return True return False @@ -1077,14 +1077,14 @@ class PresenceEventSource(object): if changed is not None and len(changed) < 500: # For small deltas, its quicker to get all changes and then # work out if we share a room or they're in our presence list - get_updates_counter.inc("stream") + get_updates_counter.labels("stream").inc() for other_user_id in changed: if other_user_id in users_interested_in: user_ids_changed.add(other_user_id) else: # Too many possible updates. Find all users we can see and check # if any of them have changed. - get_updates_counter.inc("full") + get_updates_counter.labels("full").inc() if from_key: user_ids_changed = stream_change_cache.get_entities_changed( diff --git a/synapse/http/client.py b/synapse/http/client.py index 70a19d9b74..61a1d2e2b3 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -23,7 +23,6 @@ from synapse.http import cancelled_to_request_timed_out_error from synapse.util.async import add_timeout_to_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable -import synapse.metrics from synapse.http.endpoint import SpiderEndpoint from canonicaljson import encode_canonical_json @@ -42,6 +41,7 @@ from twisted.web._newclient import ResponseDone from six import StringIO +from prometheus_client import Counter import simplejson as json import logging import urllib @@ -49,16 +49,8 @@ import urllib logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -outgoing_requests_counter = metrics.register_counter( - "requests", - labels=["method"], -) -incoming_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) +incoming_responses_counter = Counter("synapse_http_client_responses", "", ["method", "code"]) class SimpleHttpClient(object): @@ -95,7 +87,7 @@ class SimpleHttpClient(object): def request(self, method, uri, *args, **kwargs): # A small wrapper around self.agent.request() so we can easily attach # counters to it - outgoing_requests_counter.inc(method) + outgoing_requests_counter.labels(method).inc() logger.info("Sending request %s %s", method, uri) @@ -109,14 +101,14 @@ class SimpleHttpClient(object): ) response = yield make_deferred_yieldable(request_deferred) - incoming_responses_counter.inc(method, response.code) + incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, uri, response.code ) defer.returnValue(response) except Exception as e: - incoming_responses_counter.inc(method, "ERR") + incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, uri, type(e).__name__, e.message diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 4b2b85464d..259d3884e2 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -43,19 +43,13 @@ import sys import urllib from six.moves.urllib import parse as urlparse +from prometheus_client import Counter + logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") -metrics = synapse.metrics.get_metrics_for(__name__) - -outgoing_requests_counter = metrics.register_counter( - "requests", - labels=["method"], -) -incoming_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) +incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", "", ["method", "code"]) MAX_LONG_RETRIES = 10 diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e3b831db67..973ba6506f 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -18,14 +18,13 @@ import functools import time import gc import platform +import attr -from twisted.internet import reactor +from prometheus_client import Gauge, Histogram, Counter +from prometheus_client.core import ( + GaugeMetricFamily, CounterMetricFamily, REGISTRY) -from .metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, - MemoryUsageMetric, GaugeMetric, -) -from .process_collector import register_process_collector +from twisted.internet import reactor logger = logging.getLogger(__name__) @@ -34,149 +33,94 @@ logger = logging.getLogger(__name__) running_on_pypy = platform.python_implementation() == 'PyPy' all_metrics = [] all_collectors = [] +all_gauges = {} +@attr.s(hash=True) +class LaterGauge(object): -class Metrics(object): - """ A single Metrics object gives a (mutable) slice view of the all_metrics - dict, allowing callers to easily register new metrics that are namespaced - nicely.""" - - def __init__(self, name): - self.name_prefix = name - - def make_subspace(self, name): - return Metrics("%s_%s" % (self.name_prefix, name)) - - def register_collector(self, func): - all_collectors.append(func) - - def _register(self, metric_class, name, *args, **kwargs): - full_name = "%s_%s" % (self.name_prefix, name) - - metric = metric_class(full_name, *args, **kwargs) - - all_metrics.append(metric) - return metric - - def register_counter(self, *args, **kwargs): - """ - Returns: - CounterMetric - """ - return self._register(CounterMetric, *args, **kwargs) - - def register_gauge(self, *args, **kwargs): - """ - Returns: - GaugeMetric - """ - return self._register(GaugeMetric, *args, **kwargs) + name = attr.ib() + desc = attr.ib() + labels = attr.ib(hash=False) + caller = attr.ib() - def register_callback(self, *args, **kwargs): - """ - Returns: - CallbackMetric - """ - return self._register(CallbackMetric, *args, **kwargs) + def collect(self): - def register_distribution(self, *args, **kwargs): - """ - Returns: - DistributionMetric - """ - return self._register(DistributionMetric, *args, **kwargs) - - def register_cache(self, *args, **kwargs): - """ - Returns: - CacheMetric - """ - return self._register(CacheMetric, *args, **kwargs) + g = GaugeMetricFamily(self.name, self.desc, self.labels) + try: + calls = self.caller() + except Exception as e: + print(e) + logger.err() + yield g -def register_memory_metrics(hs): - try: - import psutil - process = psutil.Process() - process.memory_info().rss - except (ImportError, AttributeError): - logger.warn( - "psutil is not installed or incorrect version." - " Disabling memory metrics." - ) - return - metric = MemoryUsageMetric(hs, psutil) - all_metrics.append(metric) + if isinstance(calls, dict): + for k, v in calls.items(): + g.add_metric(k, v) + else: + g.add_metric([], calls) + yield g -def get_metrics_for(pkg_name): - """ Returns a Metrics instance for conveniently creating metrics - namespaced with the given name prefix. """ + def register(self): + if self.name in all_gauges.keys(): + REGISTRY.unregister(all_gauges.pop(self.name)) - # Convert a "package.name" to "package_name" because Prometheus doesn't - # let us use . in metric names - return Metrics(pkg_name.replace(".", "_")) + REGISTRY.register(self) + all_gauges[self.name] = self -def render_all(): - strs = [] +# +# Python GC metrics +# - for collector in all_collectors: - collector() +gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) +gc_time = Histogram("python_gc_time", "Time taken to GC (ms)", ["gen"], buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) - for metric in all_metrics: - try: - strs += metric.render() - except Exception: - strs += ["# FAILED to render"] - logger.exception("Failed to render metric") +class GCCounts(object): + def collect(self): + gc_counts = gc.get_count() - strs.append("") # to generate a final CRLF + cm = GaugeMetricFamily("python_gc_counts", "GC cycle counts", labels=["gen"]) + for n, m in enumerate(gc.get_count()): + cm.add_metric([str(n)], m) - return "\n".join(strs) + yield cm +REGISTRY.register(GCCounts()) -register_process_collector(get_metrics_for("process")) +# +# Twisted reactor metrics +# +tick_time = Histogram("python_twisted_reactor_tick_time", "Tick time of the Twisted reactor (ms)", buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000]) +pending_calls_metric = Histogram("python_twisted_reactor_pending_calls", "Pending calls", buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) -python_metrics = get_metrics_for("python") +# +# Federation Metrics +# -gc_time = python_metrics.register_distribution("gc_time", labels=["gen"]) -gc_unreachable = python_metrics.register_counter("gc_unreachable_total", labels=["gen"]) -python_metrics.register_callback( - "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"] -) +sent_edus_counter = Counter("synapse_federation_client_sent_edus", "") -reactor_metrics = get_metrics_for("python.twisted.reactor") -tick_time = reactor_metrics.register_distribution("tick_time") -pending_calls_metric = reactor_metrics.register_distribution("pending_calls") +sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "") -synapse_metrics = get_metrics_for("synapse") +events_processed_counter = Counter("synapse_federation_client_events_processed", "") # Used to track where various components have processed in the event stream, # e.g. federation sending, appservice sending, etc. -event_processing_positions = synapse_metrics.register_gauge( - "event_processing_positions", labels=["name"], -) +event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"]) # Used to track the current max events stream position -event_persisted_position = synapse_metrics.register_gauge( - "event_persisted_position", -) +event_persisted_position = Gauge("synapse_event_persisted_position", "") # Used to track the received_ts of the last event processed by various # components -event_processing_last_ts = synapse_metrics.register_gauge( - "event_processing_last_ts", labels=["name"], -) +event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"]) # Used to track the lag processing events. This is the time difference # between the last processed event's received_ts and the time it was # finished being processed. -event_processing_lag = synapse_metrics.register_gauge( - "event_processing_lag", labels=["name"], -) - +event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) def runUntilCurrentTimer(func): @@ -206,8 +150,8 @@ def runUntilCurrentTimer(func): # since about 25% of time is actually spent running things triggered by # I/O events, but that is harder to capture without rewriting half the # reactor. - tick_time.inc_by(end - start) - pending_calls_metric.inc_by(num_pending) + tick_time.observe(end - start) + pending_calls_metric.observe(num_pending) if running_on_pypy: return ret @@ -224,8 +168,8 @@ def runUntilCurrentTimer(func): unreachable = gc.collect(i) end = time.time() * 1000 - gc_time.inc_by(end - start, i) - gc_unreachable.inc_by(unreachable, i) + gc_time.labels(i).observe(end - start) + gc_unreachable.labels(i).set(unreachable) return ret diff --git a/synapse/notifier.py b/synapse/notifier.py index 8355c7d621..123e6f1840 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -28,22 +28,19 @@ from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.metrics import Measure from synapse.types import StreamToken from synapse.visibility import filter_events_for_client -import synapse.metrics +from synapse.metrics import LaterGauge from collections import namedtuple +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) +notified_events_counter = Counter("synapse_notifier_notified_events", "") -notified_events_counter = metrics.register_counter("notified_events") - -users_woken_by_stream_counter = metrics.register_counter( - "users_woken_by_stream", labels=["stream"] -) +users_woken_by_stream_counter = Counter("synapse_notifier_users_woken_by_stream", "", ["stream"]) # TODO(paul): Should be shared somewhere @@ -108,7 +105,7 @@ class _NotifierUserStream(object): self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred - users_woken_by_stream_counter.inc(stream_key) + users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) @@ -197,14 +194,14 @@ class Notifier(object): all_user_streams.add(x) return sum(stream.count_listeners() for stream in all_user_streams) - metrics.register_callback("listeners", count_listeners) + LaterGauge("listeners", "", [], count_listeners) - metrics.register_callback( - "rooms", + LaterGauge( + "rooms", "", [], lambda: count(bool, self.room_to_user_streams.values()), ) - metrics.register_callback( - "users", + LaterGauge( + "users", "", [], lambda: len(self.user_to_user_stream), ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index b077e1a446..e22088ad6f 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -20,22 +20,17 @@ from twisted.internet.error import AlreadyCalled, AlreadyCancelled from . import push_rule_evaluator from . import push_tools -import synapse from synapse.push import PusherConfigException from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure -logger = logging.getLogger(__name__) +from prometheus_client import Counter -metrics = synapse.metrics.get_metrics_for(__name__) +logger = logging.getLogger(__name__) -http_push_processed_counter = metrics.register_counter( - "http_pushes_processed", -) +http_push_processed_counter = Counter("http_pushes_processed", "") -http_push_failed_counter = metrics.register_counter( - "http_pushes_failed", -) +http_push_failed_counter = Counter("http_pushes_failed", "") class HttpPusher(object): diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 3601f2d365..c3e6c5c258 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -150,7 +150,7 @@ class PushRuleEvaluatorForEvent(object): # Caches (glob, word_boundary) -> regex for push. See _glob_matches regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR) -register_cache("regex_push_cache", regex_cache) +register_cache("cache", "regex_push_cache", regex_cache) def _glob_matches(glob, value, word_boundary=False): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 216db4d164..478c497722 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -56,6 +56,7 @@ REQUIREMENTS = { "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six": ["six"], + "prometheus_client": ["prometheus_client"], } CONDITIONAL_REQUIREMENTS = { "web_client": { diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index d7d38464b2..5848f57c5e 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -60,19 +60,19 @@ from .commands import ( ) from .streams import STREAMS_MAP +from synapse.metrics import LaterGauge from synapse.util.stringutils import random_string -from synapse.metrics.metric import CounterMetric + +from prometheus_client import Counter + +from collections import defaultdict import logging -import synapse.metrics import struct import fcntl - -metrics = synapse.metrics.get_metrics_for(__name__) - -connection_close_counter = metrics.register_counter( - "close_reason", labels=["reason_type"], +connection_close_counter = Counter( + "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"], ) @@ -136,12 +136,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): # The LoopingCall for sending pings. self._send_ping_loop = None - self.inbound_commands_counter = CounterMetric( - "inbound_commands", labels=["command"], - ) - self.outbound_commands_counter = CounterMetric( - "outbound_commands", labels=["command"], - ) + self.inbound_commands_counter = defaultdict(int) + self.outbound_commands_counter = defaultdict(int) def connectionMade(self): logger.info("[%s] Connection established", self.id()) @@ -201,7 +197,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.last_received_command = self.clock.time_msec() - self.inbound_commands_counter.inc(cmd_name) + self.inbound_commands_counter[cmd_name] = self.inbound_commands_counter[cmd_name] + 1 cmd_cls = COMMAND_MAP[cmd_name] try: @@ -251,8 +247,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self._queue_command(cmd) return - self.outbound_commands_counter.inc(cmd.NAME) - + self.outbound_commands_counter[cmd.NAME] = self.outbound_commands_counter[cmd.NAME] + 1 string = "%s %s" % (cmd.NAME, cmd.to_line(),) if "\n" in string: raise Exception("Unexpected newline in command: %r", string) @@ -317,9 +312,9 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): def connectionLost(self, reason): logger.info("[%s] Replication connection closed: %r", self.id(), reason) if isinstance(reason, Failure): - connection_close_counter.inc(reason.type.__name__) + connection_close_counter.labels(reason.type.__name__).inc() else: - connection_close_counter.inc(reason.__class__.__name__) + connection_close_counter.labels(reason.__class__.__name__).inc() try: # Remove us from list of connections to be monitored @@ -566,14 +561,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): # The following simply registers metrics for the replication connections -metrics.register_callback( - "pending_commands", +pending_commands = LaterGauge( + "pending_commands", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) def transport_buffer_size(protocol): @@ -583,14 +576,12 @@ def transport_buffer_size(protocol): return 0 -metrics.register_callback( - "transport_send_buffer", +transport_send_buffer = LaterGauge( + "synapse_replication_tcp_transport_send_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) def transport_kernel_read_buffer_size(protocol, read=True): @@ -608,48 +599,37 @@ def transport_kernel_read_buffer_size(protocol, read=True): return 0 -metrics.register_callback( - "transport_kernel_send_buffer", +tcp_transport_kernel_send_buffer = LaterGauge( + "synapse_replication_tcp_transport_kernel_send_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) -metrics.register_callback( - "transport_kernel_read_buffer", +tcp_transport_kernel_read_buffer = LaterGauge( + "synapse_replication_tcp_transport_kernel_read_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) -metrics.register_callback( - "inbound_commands", +tcp_inbound_commands = LaterGauge( + "synapse_replication_tcp_inbound_commands", "", ["command", "name", "conn_id"], lambda: { (k[0], p.name, p.conn_id): count for p in connected_connections - for k, count in p.inbound_commands_counter.counts.iteritems() - }, - labels=["command", "name", "conn_id"], -) + for k, count in p.inbound_commands_counter.items() + }) -metrics.register_callback( - "outbound_commands", +tcp_outbound_commands = LaterGauge( + "synapse_replication_tcp_outbound_commands", "", ["command", "name", "conn_id"], lambda: { (k[0], p.name, p.conn_id): count for p in connected_connections - for k, count in p.outbound_commands_counter.counts.iteritems() - }, - labels=["command", "name", "conn_id"], -) + for k, count in p.outbound_commands_counter.items() + }) # number of updates received for each RDATA stream -inbound_rdata_count = metrics.register_counter( - "inbound_rdata_count", - labels=["stream_name"], -) +inbound_rdata_count = Counter("synapse_replication_tcp_inbound_rdata_count", "", ["stream_name"]) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 4adae96681..438dcddf55 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,29 +13,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -import synapse.metrics +from prometheus_client.core import GaugeMetricFamily, REGISTRY + import os CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) -metrics = synapse.metrics.get_metrics_for("synapse.util.caches") - caches_by_name = {} -# cache_counter = metrics.register_cache( -# "cache", -# lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()}, -# labels=["name"], -# ) - - -def register_cache(name, cache): - caches_by_name[name] = cache - return metrics.register_cache( - "cache", - lambda: len(cache), - name, - ) +collectors_by_name = {} + +def register_cache(name, cache_name, cache): + + # Check if the metric is already registered. Unregister it, if so. + metric_name = "synapse_util_caches_%s:%s" % (name, cache_name,) + if metric_name in collectors_by_name.keys(): + REGISTRY.unregister(collectors_by_name[metric_name]) + + class CacheMetric(object): + + hits = 0 + misses = 0 + evicted_size = 0 + + def inc_hits(self): + self.hits += 1 + + def inc_misses(self): + self.misses += 1 + + def inc_evictions(self, size=1): + self.evicted_size += size + + def collect(self): + cache_size = len(cache) + + gm = GaugeMetricFamily(metric_name, "", labels=["size", "hits", "misses", "total"]) + gm.add_metric(["size"], cache_size) + gm.add_metric(["hits"], self.hits) + gm.add_metric(["misses"], self.misses) + gm.add_metric(["total"], self.hits + self.misses) + yield gm + metric = CacheMetric() + REGISTRY.register(metric) + caches_by_name[cache_name] = cache + collectors_by_name[metric_name] = metric + return metric KNOWN_KEYS = { key: key for key in diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 68285a7594..a4188eb099 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -80,7 +80,7 @@ class Cache(object): self.name = name self.keylen = keylen self.thread = None - self.metrics = register_cache(name, self.cache) + self.metrics = register_cache("descriptor", name, self.cache) def _on_evicted(self, evicted_count): self.metrics.inc_evictions(evicted_count) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 1709e8b429..bdc21e348f 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -55,7 +55,7 @@ class DictionaryCache(object): __slots__ = [] self.sentinel = Sentinel() - self.metrics = register_cache(name, self.cache) + self.metrics = register_cache("dictionary", name, self.cache) def check_thread(self): expected_thread = self.thread diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 0aa103eecb..ff04c91955 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -52,12 +52,12 @@ class ExpiringCache(object): self._cache = OrderedDict() - self.metrics = register_cache(cache_name, self) - self.iterable = iterable self._size_estimate = 0 + self.metrics = register_cache("expiring", cache_name, self) + def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 7f79333e96..a8491b42d5 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer from synapse.util.async import ObservableDeferred -from synapse.util.caches import metrics as cache_metrics +from synapse.util.caches import register_cache from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -38,15 +38,16 @@ class ResponseCache(object): self.timeout_sec = timeout_ms / 1000. self._name = name - self._metrics = cache_metrics.register_cache( - "response_cache", - size_callback=lambda: self.size(), - cache_name=name, + self._metrics = register_cache( + "response_cache", name, self ) def size(self): return len(self.pending_result_cache) + def __len__(self): + return self.size() + def get(self, key): """Look up the given key. diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 941d873ab8..a7fe0397fa 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -38,7 +38,7 @@ class StreamChangeCache(object): self._cache = sorteddict() self._earliest_known_stream_pos = current_stream_pos self.name = name - self.metrics = register_cache(self.name, self._cache) + self.metrics = register_cache("cache", self.name, self._cache) for entity, stream_pos in prefilled_cache.items(): self.entity_has_changed(entity, stream_pos) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..a964286d85 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -15,8 +15,8 @@ from twisted.internet import defer +from prometheus_client import Counter from synapse.util.logcontext import LoggingContext -import synapse.metrics from functools import wraps import logging @@ -24,66 +24,21 @@ import logging logger = logging.getLogger(__name__) +block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"]) -metrics = synapse.metrics.get_metrics_for(__name__) - -# total number of times we have hit this block -block_counter = metrics.register_counter( - "block_count", - labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) -) - -block_timer = metrics.register_counter( - "block_time_seconds", - labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), -) - -block_ru_utime = metrics.register_counter( - "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), -) - -block_ru_stime = metrics.register_counter( - "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), -) - -block_db_txn_count = metrics.register_counter( - "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), -) +block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"]) + +block_ru_utime = Counter("synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) + +block_ru_stime = Counter("synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) + +block_db_txn_count = Counter("synapse_util_metrics_block_db_txn_count", "", ["block_name"]) # seconds spent waiting for db txns, excluding scheduling time, in this block -block_db_txn_duration = metrics.register_counter( - "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), -) +block_db_txn_duration = Counter("synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) # seconds spent waiting for a db connection, in this block -block_db_sched_duration = metrics.register_counter( - "block_db_sched_duration_seconds", labels=["block_name"], -) +block_db_sched_duration = Counter("synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) def measure_func(name): @@ -132,8 +87,8 @@ class Measure(object): duration = self.clock.time_msec() - self.start - block_counter.inc(self.name) - block_timer.inc_by(duration, self.name) + block_counter.labels(self.name).inc() + block_timer.labels(self.name).inc(duration) context = LoggingContext.current_context() @@ -150,19 +105,13 @@ class Measure(object): ru_utime, ru_stime = context.get_resource_usage() - block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name) - block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name) - block_db_txn_count.inc_by( - context.db_txn_count - self.db_txn_count, self.name - ) - block_db_txn_duration.inc_by( - (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000., - self.name - ) - block_db_sched_duration.inc_by( - (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000., - self.name - ) + block_ru_utime.labels(self.name).inc(ru_utime - self.ru_utime) + block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) + block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) + block_db_txn_duration.labels(self.name).inc( + (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000.) + block_db_sched_duration.labels(self.name).inc( + (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000.) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) diff --git a/tests/__init__.py b/tests/__init__.py index bfebb0f644..aab20e8e02 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,3 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from twisted.trial import util +util.DEFAULT_TIMEOUT_DURATION = 10 -- cgit 1.5.1 From 020377a5504602cb5d2ce555c77bd18eefc87635 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 11:16:07 +0100 Subject: Fix logcontext resource usage tracking --- synapse/util/logcontext.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index eab9d57650..0bbda1dcaf 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -60,7 +60,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", - "usage_start", "usage_end", + "usage_start", "main_thread", "alive", "request", "tag", ] @@ -109,8 +109,10 @@ class LoggingContext(object): # ms spent waiting for db txns to be scheduled self.db_sched_duration_ms = 0 + # If alive has the thread resource usage when the logcontext last + # became active. self.usage_start = None - self.usage_end = None + self.main_thread = threading.current_thread() self.request = None self.tag = "" @@ -159,7 +161,7 @@ class LoggingContext(object): """Restore the logging context in thread local storage to the state it was before this context was entered. Returns: - None to avoid suppressing any exeptions that were thrown. + None to avoid suppressing any exceptions that were thrown. """ current = self.set_current_context(self.previous_context) if current is not self: @@ -185,29 +187,43 @@ class LoggingContext(object): def start(self): if threading.current_thread() is not self.main_thread: + logger.warning("Started logcontext %s on different thread", self) return - if self.usage_start and self.usage_end: - self.ru_utime += self.usage_end.ru_utime - self.usage_start.ru_utime - self.ru_stime += self.usage_end.ru_stime - self.usage_start.ru_stime - self.usage_start = None - self.usage_end = None - + # If we haven't already started record the thread resource usage so + # far if not self.usage_start: self.usage_start = get_thread_resource_usage() def stop(self): if threading.current_thread() is not self.main_thread: + logger.warning("Stopped logcontext %s on different thread", self) return + # When we stop lets record the resource used since we started if self.usage_start: - self.usage_end = get_thread_resource_usage() + usage_end = get_thread_resource_usage() + + self.ru_utime += usage_end.ru_utime - self.usage_start.ru_utime + self.ru_stime += usage_end.ru_stime - self.usage_start.ru_stime + + self.usage_start = None + else: + logger.warning("Called stop on logcontext %s without calling start", self) def get_resource_usage(self): + """Get CPU time used by this logcontext so far. + + Returns: + tuple[float, float]: The user and system CPU usage in seconds + """ ru_utime = self.ru_utime ru_stime = self.ru_stime - if self.usage_start and threading.current_thread() is self.main_thread: + # If we are on the correct thread and we're currently running then we + # can include resource usage so far. + is_main_thread = threading.current_thread() is self.main_thread + if self.alive and self.usage_start and is_main_thread: current = get_thread_resource_usage() ru_utime += current.ru_utime - self.usage_start.ru_utime ru_stime += current.ru_stime - self.usage_start.ru_stime -- cgit 1.5.1 From 7948ecf2342d7053a6ff6daa01268b9331e1c569 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 11:39:43 +0100 Subject: Comment --- synapse/util/logcontext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 0bbda1dcaf..914f616312 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -200,7 +200,7 @@ class LoggingContext(object): logger.warning("Stopped logcontext %s on different thread", self) return - # When we stop lets record the resource used since we started + # When we stop, let's record the resource used since we started if self.usage_start: usage_end = get_thread_resource_usage() -- cgit 1.5.1 From 85ba83eb5100abf02cf373d9a8d5010526facd45 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 16:28:23 -0500 Subject: fixes --- synapse/app/homeserver.py | 6 +++-- synapse/federation/transaction_queue.py | 6 ++--- synapse/metrics/__init__.py | 12 ++++++++-- synapse/notifier.py | 6 ++--- synapse/push/httppusher.py | 4 ++-- synapse/util/caches/__init__.py | 40 ++++++++++++++++++++++++--------- synapse/util/caches/descriptors.py | 2 +- 7 files changed, 52 insertions(+), 24 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a5b135193f..449bfacdb9 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -34,6 +34,7 @@ from synapse.module_api import ModuleApi from synapse.http.additional_resource import AdditionalResource from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite +from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \ check_requirements @@ -60,6 +61,8 @@ from twisted.web.resource import EncodingResourceWrapper, NoResource from twisted.web.server import GzipEncoderFactory from twisted.web.static import File +from prometheus_client.twisted import MetricsResource + logger = logging.getLogger("synapse.app.homeserver") @@ -229,8 +232,7 @@ class SynapseHomeServer(HomeServer): resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) if name == "metrics" and self.get_config().enable_metrics: - from prometheus_client.twisted import MetricsResource - resources[METRICS_PREFIX] = MetricsResource() + resources[METRICS_PREFIX] = MetricsResource(RegistryProxy()) if name == "replication": resources[REPLICATION_PREFIX] = ReplicationRestResource(self) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 778924a13c..2049351fdd 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -64,7 +64,7 @@ class TransactionQueue(object): # done self.pending_transactions = {} - LaterGauge("pending_destinations", "", [], + LaterGauge("synapse_federation_client_pending_destinations", "", [], lambda: len(self.pending_transactions), ) @@ -89,11 +89,11 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "pending_pdus", "", [], + "synapse_federation_client_pending_pdus", "", [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "pending_edus", "", [], + "synapse_federation_client_pending_edus", "", [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ab0b921497..38408efb54 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -29,12 +29,20 @@ from twisted.internet import reactor logger = logging.getLogger(__name__) - running_on_pypy = platform.python_implementation() == 'PyPy' all_metrics = [] all_collectors = [] all_gauges = {} + +class RegistryProxy(object): + + def collect(self): + for metric in REGISTRY.collect(): + if not metric.name.startswith("__"): + yield metric + + @attr.s(hash=True) class LaterGauge(object): @@ -45,7 +53,7 @@ class LaterGauge(object): def collect(self): - g = GaugeMetricFamily(self.name, self.desc, self.labels) + g = GaugeMetricFamily(self.name, self.desc, labels=self.labels) try: calls = self.caller() diff --git a/synapse/notifier.py b/synapse/notifier.py index 123e6f1840..40cc553918 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -194,14 +194,14 @@ class Notifier(object): all_user_streams.add(x) return sum(stream.count_listeners() for stream in all_user_streams) - LaterGauge("listeners", "", [], count_listeners) + LaterGauge("synapse_notifier_listeners", "", [], count_listeners) LaterGauge( - "rooms", "", [], + "synapse_notifier_rooms", "", [], lambda: count(bool, self.room_to_user_streams.values()), ) LaterGauge( - "users", "", [], + "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream), ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index e22088ad6f..bf7ff74a1a 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -28,9 +28,9 @@ from prometheus_client import Counter logger = logging.getLogger(__name__) -http_push_processed_counter = Counter("http_pushes_processed", "") +http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "") -http_push_failed_counter = Counter("http_pushes_failed", "") +http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "") class HttpPusher(object): diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 438dcddf55..1c511a7072 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from prometheus_client.core import GaugeMetricFamily, REGISTRY +from prometheus_client.core import Gauge, REGISTRY, GaugeMetricFamily import os @@ -22,10 +22,20 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) caches_by_name = {} collectors_by_name = {} -def register_cache(name, cache_name, cache): +cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"]) +cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"]) +cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"]) +cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) + +response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) +response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) +response_cache_evicted = Gauge("synapse_util_caches_response_cache:evicted_size", "", ["name"]) +response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) + +def register_cache(cache_type, cache_name, cache): # Check if the metric is already registered. Unregister it, if so. - metric_name = "synapse_util_caches_%s:%s" % (name, cache_name,) + metric_name = "cache_%s_%s" % (cache_type, cache_name,) if metric_name in collectors_by_name.keys(): REGISTRY.unregister(collectors_by_name[metric_name]) @@ -44,15 +54,22 @@ def register_cache(name, cache_name, cache): def inc_evictions(self, size=1): self.evicted_size += size - def collect(self): - cache_size = len(cache) + def describe(self): + return [] - gm = GaugeMetricFamily(metric_name, "", labels=["size", "hits", "misses", "total"]) - gm.add_metric(["size"], cache_size) - gm.add_metric(["hits"], self.hits) - gm.add_metric(["misses"], self.misses) - gm.add_metric(["total"], self.hits + self.misses) - yield gm + def collect(self): + if cache_type == "response_cache": + response_cache_size.labels(cache_name).set(len(cache)) + response_cache_hits.labels(cache_name).set(self.hits) + response_cache_evicted.labels(cache_name).set(self.evicted_size) + response_cache_total.labels(cache_name).set(self.hits + self.misses) + else: + cache_size.labels(cache_name).set(len(cache)) + cache_hits.labels(cache_name).set(self.hits) + cache_evicted.labels(cache_name).set(self.evicted_size) + cache_total.labels(cache_name).set(self.hits + self.misses) + + yield GaugeMetricFamily("__unused", "") metric = CacheMetric() REGISTRY.register(metric) @@ -60,6 +77,7 @@ def register_cache(name, cache_name, cache): collectors_by_name[metric_name] = metric return metric + KNOWN_KEYS = { key: key for key in ( diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index a4188eb099..8a9dcb2fc2 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -80,7 +80,7 @@ class Cache(object): self.name = name self.keylen = keylen self.thread = None - self.metrics = register_cache("descriptor", name, self.cache) + self.metrics = register_cache("cache", name, self.cache) def _on_evicted(self, evicted_count): self.metrics.inc_evictions(evicted_count) -- cgit 1.5.1 From 071206304d088aac8bd0e2fff600141dae1d09b6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 16:54:22 -0500 Subject: cleanup pep8 errors --- synapse/federation/federation_server.py | 5 +- synapse/federation/transaction_queue.py | 22 ++++-- synapse/http/request_metrics.py | 114 ++++++++++++++++++++++++-------- synapse/notifier.py | 3 +- synapse/util/caches/__init__.py | 7 +- 5 files changed, 114 insertions(+), 37 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8211273006..2d420a58a2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,10 +46,13 @@ received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") received_edus_counter = Counter("synapse_federation_server_received_edus", "") -received_queries_counter = Counter("synapse_federation_server_received_queries", "", ["type"]) +received_queries_counter = Counter( + "synapse_federation_server_received_queries", "", ["type"] +) class FederationServer(FederationBase): + def __init__(self, hs): super(FederationServer, self).__init__(hs) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 2049351fdd..53442688c8 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -28,7 +28,10 @@ from synapse.handlers.presence import format_user_presence_state, get_interested import synapse.metrics from synapse.metrics import LaterGauge from synapse.metrics import ( - sent_edus_counter, sent_transactions_counter, events_processed_counter) + sent_edus_counter, + sent_transactions_counter, + events_processed_counter, +) from prometheus_client import Counter @@ -37,7 +40,9 @@ import logging logger = logging.getLogger(__name__) -sent_pdus_destination_dist = Counter("synapse_federation_client_sent_pdu_destinations", "") +sent_pdus_destination_dist = Counter( + "synapse_federation_client_sent_pdu_destinations", "" +) class TransactionQueue(object): @@ -64,7 +69,10 @@ class TransactionQueue(object): # done self.pending_transactions = {} - LaterGauge("synapse_federation_client_pending_destinations", "", [], + LaterGauge( + "synapse_federation_client_pending_destinations", + "", + [], lambda: len(self.pending_transactions), ) @@ -89,11 +97,15 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "synapse_federation_client_pending_pdus", "", [], + "synapse_federation_client_pending_pdus", + "", + [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "synapse_federation_client_pending_edus", "", [], + "synapse_federation_client_pending_edus", + "", + [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index e7f1bfc4ae..7f11b5c5a4 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -25,47 +25,87 @@ logger = logging.getLogger(__name__) # total number of responses served, split by method/servlet/tag -response_count = Counter("synapse_http_server_response_count", "", ["method", "servlet", "tag"]) +response_count = Counter( + "synapse_http_server_response_count", "", ["method", "servlet", "tag"] +) -requests_counter = Counter("synapse_http_server_requests_received", "", ["method", "servlet"]) +requests_counter = Counter( + "synapse_http_server_requests_received", "", ["method", "servlet"] +) -outgoing_responses_counter = Counter("synapse_http_server_responses", "", ["method", "code"]) +outgoing_responses_counter = Counter( + "synapse_http_server_responses", "", ["method", "code"] +) -response_timer = Histogram("synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"]) +response_timer = Histogram( + "synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"] +) -response_ru_utime = Counter("synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"]) +response_ru_utime = Counter( + "synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"] +) -response_ru_stime = Counter("synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"]) +response_ru_stime = Counter( + "synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"] +) -response_db_txn_count = Counter("synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"]) +response_db_txn_count = Counter( + "synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -response_db_txn_duration = Counter("synapse_http_server_response_db_txn_duration_seconds", "", ["method", "servlet", "tag"]) +response_db_txn_duration = Counter( + "synapse_http_server_response_db_txn_duration_seconds", + "", + ["method", "servlet", "tag"], +) # seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = Counter("synapse_http_request_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"] +response_db_sched_duration = Counter( + "synapse_http_request_response_db_sched_duration_seconds", + "", + ["method", "servlet", "tag"], ) # size in bytes of the response written -response_size = Counter("synapse_http_request_response_size", "", ["method", "servlet", "tag"] +response_size = Counter( + "synapse_http_request_response_size", "", ["method", "servlet", "tag"] ) # In flight metrics are incremented while the requests are in flight, rather # than when the response was written. -in_flight_requests_ru_utime = Counter("synapse_http_request_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_utime = Counter( + "synapse_http_request_in_flight_requests_ru_utime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_ru_stime = Counter("synapse_http_request_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_stime = Counter( + "synapse_http_request_in_flight_requests_ru_stime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_db_txn_count = Counter("synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"]) +in_flight_requests_db_txn_count = Counter( + "synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -in_flight_requests_db_txn_duration = Counter("synapse_http_request_in_flight_requests_db_txn_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_txn_duration = Counter( + "synapse_http_request_in_flight_requests_db_txn_duration_seconds", + "", + ["method", "servlet"], +) # seconds spent waiting for a db connection, when processing this request -in_flight_requests_db_sched_duration = Counter("synapse_http_request_in_flight_requests_db_sched_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_sched_duration = Counter( + "synapse_http_request_in_flight_requests_db_sched_duration_seconds", + "", + ["method", "servlet"], +) # The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() @@ -91,9 +131,10 @@ def _get_in_flight_counts(): LaterGauge( - "synapse_http_request_metrics_in_flight_requests_count", "", + "synapse_http_request_metrics_in_flight_requests_count", + "", ["method", "servlet"], - _get_in_flight_counts + _get_in_flight_counts, ) @@ -128,16 +169,23 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag).observe(time_msec - self.start) + response_timer.labels(request.method, self.name, tag).observe( + time_msec - self.start + ) ru_utime, ru_stime = context.get_resource_usage() response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) - response_db_txn_count.labels(request.method, self.name, tag).inc(context.db_txn_count) - response_db_txn_duration.labels(request.method, self.name, tag).inc(context.db_txn_duration_ms / 1000.) + response_db_txn_count.labels(request.method, self.name, tag).inc( + context.db_txn_count + ) + response_db_txn_duration.labels(request.method, self.name, tag).inc( + context.db_txn_duration_ms / 1000. + ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000.) + context.db_sched_duration_ms / 1000. + ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -154,11 +202,17 @@ class RequestMetrics(object): in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) - in_flight_requests_db_txn_count.labels(self.method, self.name).inc(diff.db_txn_count) + in_flight_requests_db_txn_count.labels(self.method, self.name).inc( + diff.db_txn_count + ) - in_flight_requests_db_txn_duration.labels(self.method, self.name).inc(diff.db_txn_duration_ms / 1000.) + in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( + diff.db_txn_duration_ms / 1000. + ) - in_flight_requests_db_sched_duration.labels(self.method, self.name).inc(diff.db_sched_duration_ms / 1000.) + in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( + diff.db_sched_duration_ms / 1000. + ) class _RequestStats(object): @@ -166,12 +220,16 @@ class _RequestStats(object): """ __slots__ = [ - "ru_utime", "ru_stime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "ru_utime", + "ru_stime", + "db_txn_count", + "db_txn_duration_ms", + "db_sched_duration_ms", ] - def __init__(self, ru_utime, ru_stime, db_txn_count, - db_txn_duration_ms, db_sched_duration_ms): + def __init__( + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count diff --git a/synapse/notifier.py b/synapse/notifier.py index 40cc553918..6dce20a284 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,8 @@ logger = logging.getLogger(__name__) notified_events_counter = Counter("synapse_notifier_notified_events", "") -users_woken_by_stream_counter = Counter("synapse_notifier_users_woken_by_stream", "", ["stream"]) +users_woken_by_stream_counter = Counter( + "synapse_notifier_users_woken_by_stream", "", ["stream"]) # TODO(paul): Should be shared somewhere diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 1c511a7072..e0c22df249 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -29,13 +29,16 @@ cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) -response_cache_evicted = Gauge("synapse_util_caches_response_cache:evicted_size", "", ["name"]) +response_cache_evicted = Gauge( + "synapse_util_caches_response_cache:evicted_size", "", ["name"] +) response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) + def register_cache(cache_type, cache_name, cache): # Check if the metric is already registered. Unregister it, if so. - metric_name = "cache_%s_%s" % (cache_type, cache_name,) + metric_name = "cache_%s_%s" % (cache_type, cache_name) if metric_name in collectors_by_name.keys(): REGISTRY.unregister(collectors_by_name[metric_name]) -- cgit 1.5.1 From 53cc2cde1f609ec34a4ce6a7c678302c65ddfe53 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 17:32:57 -0500 Subject: cleanup --- synapse/federation/send_queue.py | 5 ++--- synapse/handlers/appservice.py | 9 ++++++--- synapse/handlers/presence.py | 12 ++++++++---- synapse/http/client.py | 3 ++- synapse/http/matrixfederationclient.py | 6 ++++-- synapse/metrics/__init__.py | 28 ++++++++++++++++++++++------ synapse/push/bulk_push_rule_evaluator.py | 6 ++++-- synapse/storage/events.py | 9 ++++++--- synapse/util/metrics.py | 15 ++++++++++----- 9 files changed, 64 insertions(+), 29 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index e6e1888f3a..c7ed465617 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -74,9 +74,8 @@ class FederationRemoteSendQueue(object): # lambda binds to the queue rather than to the name of the queue which # changes. ARGH. def register(name, queue): - LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), "", - lambda: len(queue), - ) + LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), + "", lambda: len(queue)) for queue_name in [ "presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed", diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index a7345331af..d9f35a5dba 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -127,12 +127,15 @@ class ApplicationServicesHandler(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_positions.labels("appservice_sender").set(upper_bound) + synapse.metrics.event_processing_positions.labels( + "appservice_sender").set(upper_bound) events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_lag.labels("appservice_sender").set(now - ts) - synapse.metrics.event_processing_last_ts.labels("appservice_sender").set(ts) + synapse.metrics.event_processing_lag.labels( + "appservice_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels( + "appservice_sender").set(ts) finally: self.is_processing = False diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4ee87d5714..12939aa507 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -47,7 +47,8 @@ logger = logging.getLogger(__name__) notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "") -federation_presence_out_counter = Counter("synapse_handler_presence_federation_presence_out", "") +federation_presence_out_counter = Counter( + "synapse_handler_presence_federation_presence_out", "") presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "") timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "") federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "") @@ -55,8 +56,10 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"]) -notify_reason_counter = Counter("synapse_handler_presence_notify_reason", "", ["reason"]) -state_transition_counter = Counter("synapse_handler_presence_state_transition", "", ["from", "to"] +notify_reason_counter = Counter( + "synapse_handler_presence_notify_reason", "", ["reason"]) +state_transition_counter = Counter( + "synapse_handler_presence_state_transition", "", ["from", "to"] ) @@ -213,7 +216,8 @@ class PresenceHandler(object): 60 * 1000, ) - LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], lambda: len(self.wheel_timer)) + LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], + lambda: len(self.wheel_timer)) @defer.inlineCallbacks def _on_shutdown(self): diff --git a/synapse/http/client.py b/synapse/http/client.py index 61a1d2e2b3..4d4eee3d64 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -50,7 +50,8 @@ import urllib logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) -incoming_responses_counter = Counter("synapse_http_client_responses", "", ["method", "code"]) +incoming_responses_counter = Counter("synapse_http_client_responses", "", + ["method", "code"]) class SimpleHttpClient(object): diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 259d3884e2..77eaa06a1a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -48,8 +48,10 @@ from prometheus_client import Counter logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") -outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) -incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", "", ["method", "code"]) +outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", + "", ["method"]) +incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", + "", ["method", "code"]) MAX_LONG_RETRIES = 10 diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 38408efb54..bed37b5f56 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -21,15 +21,14 @@ import platform import attr from prometheus_client import Gauge, Histogram, Counter -from prometheus_client.core import ( - GaugeMetricFamily, CounterMetricFamily, REGISTRY) +from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY from twisted.internet import reactor logger = logging.getLogger(__name__) -running_on_pypy = platform.python_implementation() == 'PyPy' +running_on_pypy = platform.python_implementation() == "PyPy" all_metrics = [] all_collectors = [] all_gauges = {} @@ -87,9 +86,16 @@ class LaterGauge(object): # gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) -gc_time = Histogram("python_gc_time", "Time taken to GC (ms)", ["gen"], buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) +gc_time = Histogram( + "python_gc_time", + "Time taken to GC (ms)", + ["gen"], + buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000], +) + class GCCounts(object): + def collect(self): gc_counts = gc.get_count() @@ -99,14 +105,23 @@ class GCCounts(object): yield cm + REGISTRY.register(GCCounts()) # # Twisted reactor metrics # -tick_time = Histogram("python_twisted_reactor_tick_time", "Tick time of the Twisted reactor (ms)", buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000]) -pending_calls_metric = Histogram("python_twisted_reactor_pending_calls", "Pending calls", buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) +tick_time = Histogram( + "python_twisted_reactor_tick_time", + "Tick time of the Twisted reactor (ms)", + buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000], +) +pending_calls_metric = Histogram( + "python_twisted_reactor_pending_calls", + "Pending calls", + buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000], +) # # Federation Metrics @@ -134,6 +149,7 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name" # finished being processed. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) + def runUntilCurrentTimer(func): @functools.wraps(func) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 6fcca5e260..b0053e7f3f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -36,8 +36,10 @@ logger = logging.getLogger(__name__) rules_by_room = {} -push_rules_invalidation_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") -push_rules_state_size_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") +push_rules_invalidation_counter = Counter( + "synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") +push_rules_state_size_counter = Counter( + "synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 00d66886ad..b96104ccae 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -45,19 +45,22 @@ from prometheus_client import Counter logger = logging.getLogger(__name__) persist_event_counter = Counter("synapse_storage_events_persisted_events", "") -event_counter = Counter("synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"]) +event_counter = Counter("synapse_storage_events_persisted_events_sep", "", + ["type", "origin_type", "origin_entity"]) # The number of times we are recalculating the current state state_delta_counter = Counter("synapse_storage_events_state_delta", "") # The number of times we are recalculating state when there is only a # single forward extremity -state_delta_single_event_counter = Counter("synapse_storage_events_state_delta_single_event", "") +state_delta_single_event_counter = Counter( + "synapse_storage_events_state_delta_single_event", "") # The number of times we are reculating state when we could have resonably # calculated the delta when we calculated the state for an event we were # persisting. -state_delta_reuse_delta_counter = Counter("synapse_storage_events_state_delta_reuse_delta", "") +state_delta_reuse_delta_counter = Counter( + "synapse_storage_events_state_delta_reuse_delta", "") def encode_json(json_object): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index a964286d85..424fdcb036 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -28,17 +28,22 @@ block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"]) block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"]) -block_ru_utime = Counter("synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) +block_ru_utime = Counter( + "synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) -block_ru_stime = Counter("synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) +block_ru_stime = Counter( + "synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) -block_db_txn_count = Counter("synapse_util_metrics_block_db_txn_count", "", ["block_name"]) +block_db_txn_count = Counter( + "synapse_util_metrics_block_db_txn_count", "", ["block_name"]) # seconds spent waiting for db txns, excluding scheduling time, in this block -block_db_txn_duration = Counter("synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) +block_db_txn_duration = Counter( + "synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) # seconds spent waiting for a db connection, in this block -block_db_sched_duration = Counter("synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) +block_db_sched_duration = Counter( + "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) def measure_func(name): -- cgit 1.5.1 From dd068ca9792e7f4a51690b91262131b5dae80455 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Thu, 24 May 2018 20:52:56 +0200 Subject: remaining isintance fixes Signed-off-by: Adrian Tschira --- synapse/storage/search.py | 5 +++-- synapse/util/caches/descriptors.py | 12 ++++++++---- synapse/util/frozenutils.py | 5 +++-- 3 files changed, 14 insertions(+), 8 deletions(-) (limited to 'synapse/util') diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 6ba3e59889..a9c299a861 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -18,13 +18,14 @@ import logging import re import simplejson as json +from six import string_types + from twisted.internet import defer from .background_updates import BackgroundUpdateStore from synapse.api.errors import SynapseError from synapse.storage.engines import PostgresEngine, Sqlite3Engine - logger = logging.getLogger(__name__) SearchEntry = namedtuple('SearchEntry', [ @@ -126,7 +127,7 @@ class SearchStore(BackgroundUpdateStore): # skip over it. continue - if not isinstance(value, basestring): + if not isinstance(value, string_types): # If the event body, name or topic isn't a string # then skip over it continue diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 68285a7594..b595cd6164 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -31,6 +31,9 @@ import functools import inspect import threading +from six import string_types, itervalues +import six + logger = logging.getLogger(__name__) @@ -205,7 +208,7 @@ class Cache(object): def invalidate_all(self): self.check_thread() self.cache.clear() - for entry in self._pending_deferred_cache.itervalues(): + for entry in itervalues(self._pending_deferred_cache): entry.invalidate() self._pending_deferred_cache.clear() @@ -392,9 +395,10 @@ class CacheDescriptor(_CacheDescriptorBase): ret.addErrback(onErr) - # If our cache_key is a string, try to convert to ascii to save - # a bit of space in large caches - if isinstance(cache_key, basestring): + # If our cache_key is a string on py2, try to convert to ascii + # to save a bit of space in large caches. Py3 does this + # internally automatically. + if six.PY2 and isinstance(cache_key, string_types): cache_key = to_ascii(cache_key) result_d = ObservableDeferred(ret, consumeErrors=True) diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index f497b51f4a..4cd0566f4f 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -16,6 +16,7 @@ from frozendict import frozendict import simplejson as json +from six import string_types def freeze(o): t = type(o) @@ -25,7 +26,7 @@ def freeze(o): if t is frozendict: return o - if t is str or t is unicode: + if isinstance(t, string_types): return o try: @@ -41,7 +42,7 @@ def unfreeze(o): if t is dict or t is frozendict: return dict({k: unfreeze(v) for k, v in o.items()}) - if t is str or t is unicode: + if isinstance(t, string_types): return o try: -- cgit 1.5.1 From 4ee4450d66c3c0230a4d81909220a0a94af8be8f Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Thu, 24 May 2018 21:44:10 +0200 Subject: fix recursion error --- synapse/util/frozenutils.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 4cd0566f4f..97914907aa 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -19,14 +19,13 @@ import simplejson as json from six import string_types def freeze(o): - t = type(o) - if t is dict: + if isinstance(o, dict): return frozendict({k: freeze(v) for k, v in o.items()}) - if t is frozendict: + if isinstance(o, frozendict): return o - if isinstance(t, string_types): + if isinstance(o, string_types): return o try: @@ -38,11 +37,10 @@ def freeze(o): def unfreeze(o): - t = type(o) - if t is dict or t is frozendict: + if isinstance(o, (dict, frozendict)): return dict({k: unfreeze(v) for k, v in o.items()}) - if isinstance(t, string_types): + if isinstance(o, string_types): return o try: -- cgit 1.5.1 From 357c74a50f1e1588a6a3d626bddb3555452c6f56 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:14:41 +1000 Subject: add comment about why unreg --- synapse/util/caches/__init__.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse/util') diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index d968b71e7a..183faf75a1 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -41,6 +41,8 @@ response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["n def register_cache(cache_type, cache_name, cache): # Check if the metric is already registered. Unregister it, if so. + # This usually happens during tests, as at runtime these caches are + # effectively singletons. metric_name = "cache_%s_%s" % (cache_type, cache_name) if metric_name in collectors_by_name.keys(): REGISTRY.unregister(collectors_by_name[metric_name]) -- cgit 1.5.1 From 3ef5cd74a6bff9b33144cb834782e0402e6eb152 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:39:27 +1000 Subject: update to more consistently use seconds in any metrics or logging --- synapse/http/request_metrics.py | 38 +++++++++++++++++++------------------- synapse/util/logcontext.py | 24 ++++++++++++------------ synapse/util/logutils.py | 6 +++--- synapse/util/metrics.py | 8 ++++---- 4 files changed, 38 insertions(+), 38 deletions(-) (limited to 'synapse/util') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index af3067b4bb..dc06f6c443 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -139,8 +139,8 @@ LaterGauge( class RequestMetrics(object): - def start(self, time_msec, name, method): - self.start = time_msec + def start(self, time_sec, name, method): + self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method @@ -149,7 +149,7 @@ class RequestMetrics(object): _in_flight_requests.add(self) - def stop(self, time_msec, request): + def stop(self, time_sec, request): _in_flight_requests.discard(self) context = LoggingContext.current_context() @@ -170,7 +170,7 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() response_timer.labels(request.method, self.name, tag).observe( - time_msec - self.start + time_sec - self.start ) ru_utime, ru_stime = context.get_resource_usage() @@ -181,10 +181,10 @@ class RequestMetrics(object): context.db_txn_count ) response_db_txn_duration.labels(request.method, self.name, tag).inc( - context.db_txn_duration_ms / 1000. + context.db_txn_duration_sec ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000. + context.db_sched_duration_sec ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -207,11 +207,11 @@ class RequestMetrics(object): ) in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( - diff.db_txn_duration_ms / 1000. + diff.db_txn_duration_sec ) in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( - diff.db_sched_duration_ms / 1000. + diff.db_sched_duration_sec ) @@ -223,18 +223,18 @@ class _RequestStats(object): "ru_utime", "ru_stime", "db_txn_count", - "db_txn_duration_ms", - "db_sched_duration_ms", + "db_txn_duration_sec", + "db_sched_duration_sec", ] def __init__( - self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_sec, db_sched_duration_sec ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count - self.db_txn_duration_ms = db_txn_duration_ms - self.db_sched_duration_ms = db_sched_duration_ms + self.db_txn_duration_sec = db_txn_duration_sec + self.db_sched_duration_sec = db_sched_duration_sec @staticmethod def from_context(context): @@ -243,8 +243,8 @@ class _RequestStats(object): return _RequestStats( ru_utime, ru_stime, context.db_txn_count, - context.db_txn_duration_ms, - context.db_sched_duration_ms, + context.db_txn_duration_sec, + context.db_sched_duration_sec, ) def update(self, context): @@ -260,14 +260,14 @@ class _RequestStats(object): new.ru_utime - self.ru_utime, new.ru_stime - self.ru_stime, new.db_txn_count - self.db_txn_count, - new.db_txn_duration_ms - self.db_txn_duration_ms, - new.db_sched_duration_ms - self.db_sched_duration_ms, + new.db_txn_duration_sec - self.db_txn_duration_sec, + new.db_sched_duration_sec - self.db_sched_duration_sec, ) self.ru_utime = new.ru_utime self.ru_stime = new.ru_stime self.db_txn_count = new.db_txn_count - self.db_txn_duration_ms = new.db_txn_duration_ms - self.db_sched_duration_ms = new.db_sched_duration_ms + self.db_txn_duration_sec = new.db_txn_duration_sec + self.db_sched_duration_sec = new.db_sched_duration_sec return diff diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 914f616312..a58c723403 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -59,7 +59,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", "usage_start", "main_thread", "alive", "request", "tag", @@ -84,10 +84,10 @@ class LoggingContext(object): def stop(self): pass - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): pass - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): pass def __nonzero__(self): @@ -103,11 +103,11 @@ class LoggingContext(object): self.ru_utime = 0. self.db_txn_count = 0 - # ms spent waiting for db txns, excluding scheduling time - self.db_txn_duration_ms = 0 + # sec spent waiting for db txns, excluding scheduling time + self.db_txn_duration_sec = 0 - # ms spent waiting for db txns to be scheduled - self.db_sched_duration_ms = 0 + # sec spent waiting for db txns to be scheduled + self.db_sched_duration_sec = 0 # If alive has the thread resource usage when the logcontext last # became active. @@ -230,18 +230,18 @@ class LoggingContext(object): return ru_utime, ru_stime - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): self.db_txn_count += 1 - self.db_txn_duration_ms += duration_ms + self.db_txn_duration_sec += duration_sec - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): """Record a use of the database pool Args: - sched_ms (int): number of milliseconds it took us to get a + sched_sec (float): number of seconds it took us to get a connection """ - self.db_sched_duration_ms += sched_ms + self.db_sched_duration_sec += sched_sec class LoggingContextFilter(logging.Filter): diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index 3a83828d25..03249c5dc8 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -96,7 +96,7 @@ def time_function(f): id = _TIME_FUNC_ID _TIME_FUNC_ID += 1 - start = time.clock() * 1000 + start = time.clock() try: _log_debug_as_f( @@ -107,10 +107,10 @@ def time_function(f): r = f(*args, **kwargs) finally: - end = time.clock() * 1000 + end = time.clock() _log_debug_as_f( f, - "[FUNC END] {%s-%d} %f", + "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start,), ) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 424fdcb036..23fc0ca20d 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -74,7 +74,7 @@ class Measure(object): self.created_context = False def __enter__(self): - self.start = self.clock.time_msec() + self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") @@ -90,7 +90,7 @@ class Measure(object): if isinstance(exc_type, Exception) or not self.start_context: return - duration = self.clock.time_msec() - self.start + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() block_timer.labels(self.name).inc(duration) @@ -114,9 +114,9 @@ class Measure(object): block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) block_db_txn_duration.labels(self.name).inc( - (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000.) + context.db_txn_duration_sec - self.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc( - (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000.) + context.db_sched_duration_sec - self.db_sched_duration_sec) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.5.1 From 57ad76fa4a4fb76dc1b9c7e8232b9589b9731ff6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:51:53 +1000 Subject: fix up tests --- synapse/util/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 23fc0ca20d..1ba7d65c7c 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -62,7 +62,7 @@ class Measure(object): __slots__ = [ "clock", "name", "start_context", "start", "new_context", "ru_utime", "ru_stime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", "created_context", ] @@ -83,8 +83,8 @@ class Measure(object): self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count - self.db_txn_duration_ms = self.start_context.db_txn_duration_ms - self.db_sched_duration_ms = self.start_context.db_sched_duration_ms + self.db_txn_duration_sec = self.start_context.db_txn_duration_sec + self.db_sched_duration_sec = self.start_context.db_sched_duration_sec def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: -- cgit 1.5.1 From 7873cde52658d53c3f25ea8257309543af148ab2 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Tue, 29 May 2018 17:35:55 +0200 Subject: pep8 --- synapse/util/frozenutils.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/util') diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 97914907aa..15f0a7ba9e 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -18,6 +18,7 @@ import simplejson as json from six import string_types + def freeze(o): if isinstance(o, dict): return frozendict({k: freeze(v) for k, v in o.items()}) -- cgit 1.5.1 From c936a52a9eb18e302fbd5158da7188f674912530 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 31 May 2018 19:03:47 +1000 Subject: Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (#3307) --- synapse/api/auth.py | 4 ++- synapse/api/filtering.py | 2 +- synapse/event_auth.py | 4 +-- synapse/events/__init__.py | 2 +- synapse/federation/federation_client.py | 4 +-- synapse/federation/send_queue.py | 2 +- synapse/federation/transaction_queue.py | 6 +++-- synapse/handlers/_base.py | 4 +-- synapse/handlers/appservice.py | 4 ++- synapse/handlers/auth.py | 6 ++--- synapse/handlers/device.py | 4 +-- synapse/handlers/federation.py | 17 ++++++------ synapse/handlers/presence.py | 12 ++++----- synapse/handlers/room.py | 2 +- synapse/handlers/room_list.py | 3 ++- synapse/handlers/search.py | 2 +- synapse/handlers/sync.py | 6 ++--- synapse/push/baserules.py | 2 +- synapse/push/mailer.py | 3 ++- synapse/push/presentable_names.py | 2 +- synapse/rest/client/transactions.py | 2 +- synapse/state.py | 3 ++- synapse/storage/events.py | 47 ++++++++++++++++++--------------- synapse/storage/presence.py | 7 ++--- synapse/storage/search.py | 4 +-- synapse/storage/state.py | 47 ++++++++++++++++++--------------- synapse/storage/user_directory.py | 8 +++--- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/treecache.py | 6 +++-- 29 files changed, 116 insertions(+), 101 deletions(-) (limited to 'synapse/util') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index b052cf532b..06fa38366d 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -15,6 +15,8 @@ import logging +from six import itervalues + import pymacaroons from twisted.internet import defer @@ -66,7 +68,7 @@ class Auth(object): ) auth_events = yield self.store.get_events(auth_events_ids) auth_events = { - (e.type, e.state_key): e for e in auth_events.values() + (e.type, e.state_key): e for e in itervalues(auth_events) } self.check(event, auth_events=auth_events, do_sig_check=do_sig_check) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index db43219d24..dbc0e7e445 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -411,7 +411,7 @@ class Filter(object): return room_ids def filter(self, events): - return filter(self.check, events) + return list(filter(self.check, events)) def limit(self): return self.filter_json.get("limit", 10) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index cd5627e36a..eaf9cecde6 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -471,14 +471,14 @@ def _check_power_levels(event, auth_events): ] old_list = current_state.content.get("users", {}) - for user in set(old_list.keys() + user_list.keys()): + for user in set(list(old_list) + list(user_list)): levels_to_check.append( (user, "users") ) old_list = current_state.content.get("events", {}) new_list = event.content.get("events", {}) - for ev_id in set(old_list.keys() + new_list.keys()): + for ev_id in set(list(old_list) + list(new_list)): levels_to_check.append( (ev_id, "events") ) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index c3ff85c49a..cb08da4984 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -146,7 +146,7 @@ class EventBase(object): return field in self._event_dict def items(self): - return self._event_dict.items() + return list(self._event_dict.items()) class FrozenEvent(EventBase): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 2761ffae07..87a92f6ea9 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -391,7 +391,7 @@ class FederationClient(FederationBase): """ if return_local: seen_events = yield self.store.get_events(event_ids, allow_rejected=True) - signed_events = seen_events.values() + signed_events = list(seen_events.values()) else: seen_events = yield self.store.have_seen_events(event_ids) signed_events = [] @@ -589,7 +589,7 @@ class FederationClient(FederationBase): } valid_pdus = yield self._check_sigs_and_hash_and_fetch( - destination, pdus.values(), + destination, list(pdus.values()), outlier=True, ) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index c7ed465617..3dcc629d44 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -197,7 +197,7 @@ class FederationRemoteSendQueue(object): # We only want to send presence for our own users, so lets always just # filter here just in case. - local_states = filter(lambda s: self.is_mine_id(s.user_id), states) + local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states)) self.presence_map.update({state.user_id: state for state in local_states}) self.presence_changed[pos] = [state.user_id for state in local_states] diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 69312ec233..f0aeb5a0d3 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -35,6 +35,8 @@ from synapse.metrics import ( from prometheus_client import Counter +from six import itervalues + import logging @@ -234,7 +236,7 @@ class TransactionQueue(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ logcontext.run_in_background(handle_room_events, evs) - for evs in events_by_room.itervalues() + for evs in itervalues(events_by_room) ], consumeErrors=True )) @@ -325,7 +327,7 @@ class TransactionQueue(object): if not states_map: break - yield self._process_presence_inner(states_map.values()) + yield self._process_presence_inner(list(states_map.values())) except Exception: logger.exception("Error sending presence states to servers") finally: diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index e089e66fde..2d1db0c245 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -114,14 +114,14 @@ class BaseHandler(object): if guest_access != "can_join": if context: current_state = yield self.store.get_events( - context.current_state_ids.values() + list(context.current_state_ids.values()) ) else: current_state = yield self.state_handler.get_current_state( event.room_id ) - current_state = current_state.values() + current_state = list(current_state.values()) logger.info("maybe_kick_guest_users %r", current_state) yield self.kick_guest_users(current_state) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index d9f35a5dba..1c29c43a83 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -15,6 +15,8 @@ from twisted.internet import defer +from six import itervalues + import synapse from synapse.api.constants import EventTypes from synapse.util.metrics import Measure @@ -119,7 +121,7 @@ class ApplicationServicesHandler(object): yield make_deferred_yieldable(defer.gatherResults([ run_in_background(handle_room_events, evs) - for evs in events_by_room.itervalues() + for evs in itervalues(events_by_room) ], consumeErrors=True)) yield self.store.set_appservice_last_pos(upper_bound) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a5365c4fe4..3c0051586d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -249,7 +249,7 @@ class AuthHandler(BaseHandler): errordict = e.error_dict() for f in flows: - if len(set(f) - set(creds.keys())) == 0: + if len(set(f) - set(creds)) == 0: # it's very useful to know what args are stored, but this can # include the password in the case of registering, so only log # the keys (confusingly, clientdict may contain a password @@ -257,12 +257,12 @@ class AuthHandler(BaseHandler): # and is not sensitive). logger.info( "Auth completed with creds: %r. Client dict has keys: %r", - creds, clientdict.keys() + creds, list(clientdict) ) defer.returnValue((creds, clientdict, session['id'])) ret = self._auth_dict_for_flows(flows, session) - ret['completed'] = creds.keys() + ret['completed'] = list(creds) ret.update(errordict) raise InteractiveAuthIncompleteError( ret, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 31bd0e60c6..11c6fb3657 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -114,7 +114,7 @@ class DeviceHandler(BaseHandler): user_id, device_id=None ) - devices = device_map.values() + devices = list(device_map.values()) for device in devices: _update_device_from_client_ips(device, ips) @@ -187,7 +187,7 @@ class DeviceHandler(BaseHandler): defer.Deferred: """ device_map = yield self.store.get_devices_by_user(user_id) - device_ids = device_map.keys() + device_ids = list(device_map) if except_device_id is not None: device_ids = [d for d in device_ids if d != except_device_id] yield self.delete_devices(user_id, device_ids) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 87c0615820..fcf94befb7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -52,7 +52,6 @@ from synapse.util.retryutils import NotRetryingDestination from synapse.util.distributor import user_joined_room - logger = logging.getLogger(__name__) @@ -480,8 +479,8 @@ class FederationHandler(BaseHandler): # to get all state ids that we're interested in. event_map = yield self.store.get_events([ e_id - for key_to_eid in event_to_state_ids.itervalues() - for key, e_id in key_to_eid.iteritems() + for key_to_eid in list(event_to_state_ids.values()) + for key, e_id in key_to_eid.items() if key[0] != EventTypes.Member or check_match(key[1]) ]) @@ -1149,13 +1148,13 @@ class FederationHandler(BaseHandler): user = UserID.from_string(event.state_key) yield user_joined_room(self.distributor, user, event.room_id) - state_ids = context.prev_state_ids.values() + state_ids = list(context.prev_state_ids.values()) auth_chain = yield self.store.get_auth_chain(state_ids) - state = yield self.store.get_events(context.prev_state_ids.values()) + state = yield self.store.get_events(list(context.prev_state_ids.values())) defer.returnValue({ - "state": state.values(), + "state": list(state.values()), "auth_chain": auth_chain, }) @@ -1405,7 +1404,7 @@ class FederationHandler(BaseHandler): else: del results[(event.type, event.state_key)] - res = results.values() + res = list(results.values()) for event in res: # We sign these again because there was a bug where we # incorrectly signed things the first time round @@ -1446,7 +1445,7 @@ class FederationHandler(BaseHandler): else: results.pop((event.type, event.state_key), None) - defer.returnValue(results.values()) + defer.returnValue(list(results.values())) else: defer.returnValue([]) @@ -1915,7 +1914,7 @@ class FederationHandler(BaseHandler): }) new_state = self.state_handler.resolve_events( - [local_view.values(), remote_view.values()], + [list(local_view.values()), list(remote_view.values())], event ) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 26fc0d3ec7..7fe568132f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -325,7 +325,7 @@ class PresenceHandler(object): if to_notify: notified_presence_counter.inc(len(to_notify)) - yield self._persist_and_notify(to_notify.values()) + yield self._persist_and_notify(list(to_notify.values())) self.unpersisted_users_changes |= set(s.user_id for s in new_states) self.unpersisted_users_changes -= set(to_notify.keys()) @@ -687,7 +687,7 @@ class PresenceHandler(object): """ updates = yield self.current_state_for_users(target_user_ids) - updates = updates.values() + updates = list(updates.values()) for user_id in set(target_user_ids) - set(u.user_id for u in updates): updates.append(UserPresenceState.default(user_id)) @@ -753,11 +753,11 @@ class PresenceHandler(object): self._push_to_remotes([state]) else: user_ids = yield self.store.get_users_in_room(room_id) - user_ids = filter(self.is_mine_id, user_ids) + user_ids = list(filter(self.is_mine_id, user_ids)) states = yield self.current_state_for_users(user_ids) - self._push_to_remotes(states.values()) + self._push_to_remotes(list(states.values())) @defer.inlineCallbacks def get_presence_list(self, observer_user, accepted=None): @@ -1051,7 +1051,7 @@ class PresenceEventSource(object): updates = yield presence.current_state_for_users(user_ids_changed) if include_offline: - defer.returnValue((updates.values(), max_token)) + defer.returnValue((list(updates.values()), max_token)) else: defer.returnValue(([ s for s in itervalues(updates) @@ -1112,7 +1112,7 @@ def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): if new_state: changes[state.user_id] = new_state - return changes.values() + return list(changes.values()) def handle_timeout(state, is_mine, syncing_user_ids, now): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b5850db42f..2abd63ad05 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -455,7 +455,7 @@ class RoomContextHandler(BaseHandler): state = yield self.store.get_state_for_events( [last_event_id], None ) - results["state"] = state[last_event_id].values() + results["state"] = list(state[last_event_id].values()) results["start"] = now_token.copy_and_replace( "room_key", results["start"] diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 5757bb7f8a..fc507cef36 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -15,6 +15,7 @@ from twisted.internet import defer +from six import iteritems from six.moves import range from ._base import BaseHandler @@ -307,7 +308,7 @@ class RoomListHandler(BaseHandler): ) event_map = yield self.store.get_events([ - event_id for key, event_id in current_state_ids.iteritems() + event_id for key, event_id in iteritems(current_state_ids) if key[0] in ( EventTypes.JoinRules, EventTypes.Name, diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 9772ed1a0e..1eca26aa1e 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -348,7 +348,7 @@ class SearchHandler(BaseHandler): rooms = set(e.room_id for e in allowed_events) for room_id in rooms: state = yield self.state_handler.get_current_state(room_id) - state_results[room_id] = state.values() + state_results[room_id] = list(state.values()) state_results.values() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 8377650b68..51ec727df0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -541,11 +541,11 @@ class SyncHandler(object): state = {} if state_ids: - state = yield self.store.get_events(state_ids.values()) + state = yield self.store.get_events(list(state_ids.values())) defer.returnValue({ (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state(state.values()) + for e in sync_config.filter_collection.filter_room_state(list(state.values())) }) @defer.inlineCallbacks @@ -894,7 +894,7 @@ class SyncHandler(object): presence.extend(states) # Deduplicate the presence entries so that there's at most one per user - presence = {p.user_id: p for p in presence}.values() + presence = list({p.user_id: p for p in presence}.values()) presence = sync_config.filter_collection.filter_presence( presence diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 7a18afe5f9..a8ae7bcd6c 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -39,7 +39,7 @@ def list_with_base_rules(rawrules): rawrules = [r for r in rawrules if r['priority_class'] >= 0] # shove the server default rules for each kind onto the end of each - current_prio_class = PRIORITY_CLASS_INVERSE_MAP.keys()[-1] + current_prio_class = list(PRIORITY_CLASS_INVERSE_MAP)[-1] ruleslist.extend(make_base_prepend_rules( PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b5cd9b426a..d4be800e5e 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -229,7 +229,8 @@ class Mailer(object): if room_vars['notifs'] and 'messages' in room_vars['notifs'][-1]: prev_messages = room_vars['notifs'][-1]['messages'] for message in notifvars['messages']: - pm = filter(lambda pm: pm['id'] == message['id'], prev_messages) + pm = list(filter(lambda pm: pm['id'] == message['id'], + prev_messages)) if pm: if not message["is_historical"]: pm[0]["is_historical"] = False diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 277da3cd35..43f0c74ff3 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -113,7 +113,7 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True # so find out who is in the room that isn't the user. if "m.room.member" in room_state_bytype_ids: member_events = yield store.get_events( - room_state_bytype_ids["m.room.member"].values() + list(room_state_bytype_ids["m.room.member"].values()) ) all_members = [ ev for ev in member_events.values() diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 20fa6678ef..7c01b438cb 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -104,7 +104,7 @@ class HttpTransactionCache(object): def _cleanup(self): now = self.clock.time_msec() - for key in self.transactions.keys(): + for key in list(self.transactions): ts = self.transactions[key][1] if now > (ts + CLEANUP_PERIOD_MS): # after cleanup period del self.transactions[key] diff --git a/synapse/state.py b/synapse/state.py index b8c27c6815..216418f58d 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -132,7 +132,8 @@ class StateHandler(object): defer.returnValue(event) return - state_map = yield self.store.get_events(state.values(), get_prev_content=False) + state_map = yield self.store.get_events(list(state.values()), + get_prev_content=False) state = { key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map } diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b96104ccae..cb1082e864 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -40,6 +40,9 @@ import synapse.metrics from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from six.moves import range +from six import itervalues, iteritems + from prometheus_client import Counter logger = logging.getLogger(__name__) @@ -245,7 +248,7 @@ class EventsStore(EventsWorkerStore): partitioned.setdefault(event.room_id, []).append((event, ctx)) deferreds = [] - for room_id, evs_ctxs in partitioned.iteritems(): + for room_id, evs_ctxs in iteritems(partitioned): d = self._event_persist_queue.add_to_queue( room_id, evs_ctxs, backfilled=backfilled, @@ -330,7 +333,7 @@ class EventsStore(EventsWorkerStore): chunks = [ events_and_contexts[x:x + 100] - for x in xrange(0, len(events_and_contexts), 100) + for x in range(0, len(events_and_contexts), 100) ] for chunk in chunks: @@ -364,7 +367,7 @@ class EventsStore(EventsWorkerStore): (event, context) ) - for room_id, ev_ctx_rm in events_by_room.iteritems(): + for room_id, ev_ctx_rm in iteritems(events_by_room): # Work out new extremities by recursively adding and removing # the new events. latest_event_ids = yield self.get_latest_event_ids_in_room( @@ -459,12 +462,12 @@ class EventsStore(EventsWorkerStore): event_counter.labels(event.type, origin_type, origin_entity).inc() - for room_id, new_state in current_state_for_room.iteritems(): + for room_id, new_state in iteritems(current_state_for_room): self.get_current_state_ids.prefill( (room_id, ), new_state ) - for room_id, latest_event_ids in new_forward_extremeties.iteritems(): + for room_id, latest_event_ids in iteritems(new_forward_extremeties): self.get_latest_event_ids_in_room.prefill( (room_id,), list(latest_event_ids) ) @@ -641,20 +644,20 @@ class EventsStore(EventsWorkerStore): """ existing_state = yield self.get_current_state_ids(room_id) - existing_events = set(existing_state.itervalues()) - new_events = set(ev_id for ev_id in current_state.itervalues()) + existing_events = set(itervalues(existing_state)) + new_events = set(ev_id for ev_id in itervalues(current_state)) changed_events = existing_events ^ new_events if not changed_events: return to_delete = { - key: ev_id for key, ev_id in existing_state.iteritems() + key: ev_id for key, ev_id in iteritems(existing_state) if ev_id in changed_events } events_to_insert = (new_events - existing_events) to_insert = { - key: ev_id for key, ev_id in current_state.iteritems() + key: ev_id for key, ev_id in iteritems(current_state) if ev_id in events_to_insert } @@ -757,11 +760,11 @@ class EventsStore(EventsWorkerStore): ) def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order): - for room_id, current_state_tuple in state_delta_by_room.iteritems(): + for room_id, current_state_tuple in iteritems(state_delta_by_room): to_delete, to_insert = current_state_tuple txn.executemany( "DELETE FROM current_state_events WHERE event_id = ?", - [(ev_id,) for ev_id in to_delete.itervalues()], + [(ev_id,) for ev_id in itervalues(to_delete)], ) self._simple_insert_many_txn( @@ -774,7 +777,7 @@ class EventsStore(EventsWorkerStore): "type": key[0], "state_key": key[1], } - for key, ev_id in to_insert.iteritems() + for key, ev_id in iteritems(to_insert) ], ) @@ -793,7 +796,7 @@ class EventsStore(EventsWorkerStore): "event_id": ev_id, "prev_event_id": to_delete.get(key, None), } - for key, ev_id in state_deltas.iteritems() + for key, ev_id in iteritems(state_deltas) ] ) @@ -836,7 +839,7 @@ class EventsStore(EventsWorkerStore): def _update_forward_extremities_txn(self, txn, new_forward_extremities, max_stream_order): - for room_id, new_extrem in new_forward_extremities.iteritems(): + for room_id, new_extrem in iteritems(new_forward_extremities): self._simple_delete_txn( txn, table="event_forward_extremities", @@ -854,7 +857,7 @@ class EventsStore(EventsWorkerStore): "event_id": ev_id, "room_id": room_id, } - for room_id, new_extrem in new_forward_extremities.iteritems() + for room_id, new_extrem in iteritems(new_forward_extremities) for ev_id in new_extrem ], ) @@ -871,7 +874,7 @@ class EventsStore(EventsWorkerStore): "event_id": event_id, "stream_ordering": max_stream_order, } - for room_id, new_extrem in new_forward_extremities.iteritems() + for room_id, new_extrem in iteritems(new_forward_extremities) for event_id in new_extrem ] ) @@ -899,7 +902,7 @@ class EventsStore(EventsWorkerStore): new_events_and_contexts[event.event_id] = (event, context) else: new_events_and_contexts[event.event_id] = (event, context) - return new_events_and_contexts.values() + return list(new_events_and_contexts.values()) def _update_room_depths_txn(self, txn, events_and_contexts, backfilled): """Update min_depth for each room @@ -925,7 +928,7 @@ class EventsStore(EventsWorkerStore): event.depth, depth_updates.get(event.room_id, event.depth) ) - for room_id, depth in depth_updates.iteritems(): + for room_id, depth in iteritems(depth_updates): self._update_min_depth_for_room_txn(txn, room_id, depth) def _update_outliers_txn(self, txn, events_and_contexts): @@ -1309,7 +1312,7 @@ class EventsStore(EventsWorkerStore): " WHERE e.event_id IN (%s)" ) % (",".join(["?"] * len(ev_map)),) - txn.execute(sql, ev_map.keys()) + txn.execute(sql, list(ev_map)) rows = self.cursor_to_dict(txn) for row in rows: event = ev_map[row["event_id"]] @@ -1572,7 +1575,7 @@ class EventsStore(EventsWorkerStore): chunks = [ event_ids[i:i + 100] - for i in xrange(0, len(event_ids), 100) + for i in range(0, len(event_ids), 100) ] for chunk in chunks: ev_rows = self._simple_select_many_txn( @@ -1986,7 +1989,7 @@ class EventsStore(EventsWorkerStore): logger.info("[purge] finding state groups which depend on redundant" " state groups") remaining_state_groups = [] - for i in xrange(0, len(state_rows), 100): + for i in range(0, len(state_rows), 100): chunk = [sg for sg, in state_rows[i:i + 100]] # look for state groups whose prev_state_group is one we are about # to delete @@ -2042,7 +2045,7 @@ class EventsStore(EventsWorkerStore): "state_key": key[1], "event_id": state_id, } - for key, state_id in curr_state.iteritems() + for key, state_id in iteritems(curr_state) ], ) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 9e9d3c2591..f05d91cc58 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -16,6 +16,7 @@ from ._base import SQLBaseStore from synapse.api.constants import PresenceState from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList +from synapse.util import batch_iter from collections import namedtuple from twisted.internet import defer @@ -115,11 +116,7 @@ class PresenceStore(SQLBaseStore): " AND user_id IN (%s)" ) - batches = ( - presence_states[i:i + 50] - for i in xrange(0, len(presence_states), 50) - ) - for states in batches: + for states in batch_iter(presence_states, 50): args = [stream_id] args.extend(s.user_id for s in states) txn.execute( diff --git a/synapse/storage/search.py b/synapse/storage/search.py index a9c299a861..f0fa5d7631 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -448,7 +448,7 @@ class SearchStore(BackgroundUpdateStore): "search_msgs", self.cursor_to_dict, sql, *args ) - results = filter(lambda row: row["room_id"] in room_ids, results) + results = list(filter(lambda row: row["room_id"] in room_ids, results)) events = yield self._get_events([r["event_id"] for r in results]) @@ -603,7 +603,7 @@ class SearchStore(BackgroundUpdateStore): "search_rooms", self.cursor_to_dict, sql, *args ) - results = filter(lambda row: row["room_id"] in room_ids, results) + results = list(filter(lambda row: row["room_id"] in room_ids, results)) events = yield self._get_events([r["event_id"] for r in results]) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index ffa4246031..bdee14a8eb 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -16,6 +16,9 @@ from collections import namedtuple import logging +from six import iteritems, itervalues +from six.moves import range + from twisted.internet import defer from synapse.storage.background_updates import BackgroundUpdateStore @@ -134,7 +137,7 @@ class StateGroupWorkerStore(SQLBaseStore): event_ids, ) - groups = set(event_to_groups.itervalues()) + groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups) defer.returnValue(group_to_state) @@ -166,18 +169,18 @@ class StateGroupWorkerStore(SQLBaseStore): state_event_map = yield self.get_events( [ - ev_id for group_ids in group_to_ids.itervalues() - for ev_id in group_ids.itervalues() + ev_id for group_ids in itervalues(group_to_ids) + for ev_id in itervalues(group_ids) ], get_prev_content=False ) defer.returnValue({ group: [ - state_event_map[v] for v in event_id_map.itervalues() + state_event_map[v] for v in itervalues(event_id_map) if v in state_event_map ] - for group, event_id_map in group_to_ids.iteritems() + for group, event_id_map in iteritems(group_to_ids) }) @defer.inlineCallbacks @@ -186,7 +189,7 @@ class StateGroupWorkerStore(SQLBaseStore): """ results = {} - chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)] + chunks = [groups[i:i + 100] for i in range(0, len(groups), 100)] for chunk in chunks: res = yield self.runInteraction( "_get_state_groups_from_groups", @@ -347,21 +350,21 @@ class StateGroupWorkerStore(SQLBaseStore): event_ids, ) - groups = set(event_to_groups.itervalues()) + groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups, types) state_event_map = yield self.get_events( - [ev_id for sd in group_to_state.itervalues() for ev_id in sd.itervalues()], + [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)], get_prev_content=False ) event_to_state = { event_id: { k: state_event_map[v] - for k, v in group_to_state[group].iteritems() + for k, v in iteritems(group_to_state[group]) if v in state_event_map } - for event_id, group in event_to_groups.iteritems() + for event_id, group in iteritems(event_to_groups) } defer.returnValue({event: event_to_state[event] for event in event_ids}) @@ -384,12 +387,12 @@ class StateGroupWorkerStore(SQLBaseStore): event_ids, ) - groups = set(event_to_groups.itervalues()) + groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups, types) event_to_state = { event_id: group_to_state[group] - for event_id, group in event_to_groups.iteritems() + for event_id, group in iteritems(event_to_groups) } defer.returnValue({event: event_to_state[event] for event in event_ids}) @@ -503,7 +506,7 @@ class StateGroupWorkerStore(SQLBaseStore): got_all = is_all or not missing_types return { - k: v for k, v in state_dict_ids.iteritems() + k: v for k, v in iteritems(state_dict_ids) if include(k[0], k[1]) }, missing_types, got_all @@ -562,12 +565,12 @@ class StateGroupWorkerStore(SQLBaseStore): # Now we want to update the cache with all the things we fetched # from the database. - for group, group_state_dict in group_to_state_dict.iteritems(): + for group, group_state_dict in iteritems(group_to_state_dict): state_dict = results[group] state_dict.update( ((intern_string(k[0]), intern_string(k[1])), to_ascii(v)) - for k, v in group_state_dict.iteritems() + for k, v in iteritems(group_state_dict) ) self._state_group_cache.update( @@ -654,7 +657,7 @@ class StateGroupWorkerStore(SQLBaseStore): "state_key": key[1], "event_id": state_id, } - for key, state_id in delta_ids.iteritems() + for key, state_id in iteritems(delta_ids) ], ) else: @@ -669,7 +672,7 @@ class StateGroupWorkerStore(SQLBaseStore): "state_key": key[1], "event_id": state_id, } - for key, state_id in current_state_ids.iteritems() + for key, state_id in iteritems(current_state_ids) ], ) @@ -794,11 +797,11 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): "state_group": state_group_id, "event_id": event_id, } - for event_id, state_group_id in state_groups.iteritems() + for event_id, state_group_id in iteritems(state_groups) ], ) - for event_id, state_group_id in state_groups.iteritems(): + for event_id, state_group_id in iteritems(state_groups): txn.call_after( self._get_state_group_for_event.prefill, (event_id,), state_group_id @@ -826,7 +829,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): def reindex_txn(txn): new_last_state_group = last_state_group - for count in xrange(batch_size): + for count in range(batch_size): txn.execute( "SELECT id, room_id FROM state_groups" " WHERE ? < id AND id <= ?" @@ -884,7 +887,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): # of keys delta_state = { - key: value for key, value in curr_state.iteritems() + key: value for key, value in iteritems(curr_state) if prev_state.get(key, None) != value } @@ -924,7 +927,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): "state_key": key[1], "event_id": state_id, } - for key, state_id in delta_state.iteritems() + for key, state_id in iteritems(delta_state) ], ) diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index d6e289ffbe..275c299998 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -22,6 +22,8 @@ from synapse.api.constants import EventTypes, JoinRules from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import get_domain_from_id, get_localpart_from_id +from six import iteritems + import re import logging @@ -100,7 +102,7 @@ class UserDirectoryStore(SQLBaseStore): user_id, get_localpart_from_id(user_id), get_domain_from_id(user_id), profile.display_name, ) - for user_id, profile in users_with_profile.iteritems() + for user_id, profile in iteritems(users_with_profile) ) elif isinstance(self.database_engine, Sqlite3Engine): sql = """ @@ -112,7 +114,7 @@ class UserDirectoryStore(SQLBaseStore): user_id, "%s %s" % (user_id, p.display_name,) if p.display_name else user_id ) - for user_id, p in users_with_profile.iteritems() + for user_id, p in iteritems(users_with_profile) ) else: # This should be unreachable. @@ -130,7 +132,7 @@ class UserDirectoryStore(SQLBaseStore): "display_name": profile.display_name, "avatar_url": profile.avatar_url, } - for user_id, profile in users_with_profile.iteritems() + for user_id, profile in iteritems(users_with_profile) ] ) for user_id in users_with_profile: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index f4e2c30088..fc1874b65b 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -569,7 +569,7 @@ class CacheListDescriptor(_CacheDescriptorBase): return results return logcontext.make_deferred_yieldable(defer.gatherResults( - cached_defers.values(), + list(cached_defers.values()), consumeErrors=True, ).addCallback(update_results_dict).addErrback( unwrapFirstError diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index fcc341a6b7..dd4c9e6067 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -1,3 +1,5 @@ +from six import itervalues + SENTINEL = object() @@ -49,7 +51,7 @@ class TreeCache(object): if popped is SENTINEL: return default - node_and_keys = zip(nodes, key) + node_and_keys = list(zip(nodes, key)) node_and_keys.reverse() node_and_keys.append((self.root, None)) @@ -76,7 +78,7 @@ def iterate_tree_cache_entry(d): can contain dicts. """ if isinstance(d, dict): - for value_d in d.itervalues(): + for value_d in itervalues(d): for value in iterate_tree_cache_entry(value_d): yield value else: -- cgit 1.5.1 From 042eedfa2b56caef0d4873583e768ca3664a881f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Feb 2018 20:49:55 +0000 Subject: Add hacky cache factor override system --- synapse/app/synctl.py | 4 ++++ synapse/storage/state.py | 4 ++-- synapse/util/caches/__init__.py | 10 ++++++++++ synapse/util/caches/descriptors.py | 4 ++-- 4 files changed, 18 insertions(+), 4 deletions(-) (limited to 'synapse/util') diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 712dfa870e..56ae086128 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -171,6 +171,10 @@ def main(): if cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + cache_factors = config.get("synctl_cache_factors", {}) + for cache_name, factor in cache_factors.iteritems(): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + worker_configfiles = [] if options.worker: start_stop_synapse = False diff --git a/synapse/storage/state.py b/synapse/storage/state.py index bdee14a8eb..c11bc52177 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -23,7 +23,7 @@ from twisted.internet import defer from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.engines import PostgresEngine -from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR +from synapse.util.caches import intern_string, get_cache_factor_for from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.stringutils import to_ascii @@ -57,7 +57,7 @@ class StateGroupWorkerStore(SQLBaseStore): super(StateGroupWorkerStore, self).__init__(db_conn, hs) self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR + "*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache") ) @cached(max_entries=100000, iterable=True) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 183faf75a1..900575eb3c 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -22,6 +22,16 @@ import six CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) + +def get_cache_factor_for(cache_name): + env_var = "SYNAPSE_CACHE_FACTOR_" + cache_name.upper() + factor = os.environ.get(env_var) + if factor: + return float(factor) + + return CACHE_SIZE_FACTOR + + caches_by_name = {} collectors_by_name = {} diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index fc1874b65b..65a1042de1 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -17,7 +17,7 @@ import logging from synapse.util.async import ObservableDeferred from synapse.util import unwrapFirstError, logcontext -from synapse.util.caches import CACHE_SIZE_FACTOR +from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry from synapse.util.stringutils import to_ascii @@ -313,7 +313,7 @@ class CacheDescriptor(_CacheDescriptorBase): orig, num_args=num_args, inlineCallbacks=inlineCallbacks, cache_context=cache_context) - max_entries = int(max_entries * CACHE_SIZE_FACTOR) + max_entries = int(max_entries * get_cache_factor_for(orig.__name__)) self.max_entries = max_entries self.tree = tree -- cgit 1.5.1 From f7869f8f8b5a9817fde250c8c9dd44869e50a796 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 6 Jun 2018 00:13:57 +1000 Subject: Port to sortedcontainers (with tests!) (#3332) --- .gitignore | 1 + setup.cfg | 3 +- synapse/federation/send_queue.py | 14 +- synapse/python_dependencies.py | 3 +- synapse/util/caches/stream_change_cache.py | 57 +++++---- tests/util/test_stream_change_cache.py | 198 +++++++++++++++++++++++++++++ 6 files changed, 241 insertions(+), 35 deletions(-) create mode 100644 tests/util/test_stream_change_cache.py (limited to 'synapse/util') diff --git a/.gitignore b/.gitignore index 7940158c5b..9f42a7568f 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ media_store/ build/ venv/ +venv*/ localhost-800*/ static/client/register/register_config.js diff --git a/setup.cfg b/setup.cfg index da8eafbb39..fa6f2d1ce4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,4 +17,5 @@ ignore = [flake8] max-line-length = 90 # W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it. -ignore = W503 +# E203 is contrary to PEP8. +ignore = W503,E203 diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 9f1142b5a9..0f4aea95a3 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -35,7 +35,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.metrics import Measure from synapse.metrics import LaterGauge -from blist import sorteddict +from sortedcontainers import SortedDict from collections import namedtuple import logging @@ -55,19 +55,19 @@ class FederationRemoteSendQueue(object): self.is_mine_id = hs.is_mine_id self.presence_map = {} # Pending presence map user_id -> UserPresenceState - self.presence_changed = sorteddict() # Stream position -> user_id + self.presence_changed = SortedDict() # Stream position -> user_id self.keyed_edu = {} # (destination, key) -> EDU - self.keyed_edu_changed = sorteddict() # stream position -> (destination, key) + self.keyed_edu_changed = SortedDict() # stream position -> (destination, key) - self.edus = sorteddict() # stream position -> Edu + self.edus = SortedDict() # stream position -> Edu - self.failures = sorteddict() # stream position -> (destination, Failure) + self.failures = SortedDict() # stream position -> (destination, Failure) - self.device_messages = sorteddict() # stream position -> destination + self.device_messages = SortedDict() # stream position -> destination self.pos = 1 - self.pos_time = sorteddict() + self.pos_time = SortedDict() # EVERYTHING IS SAD. In particular, python only makes new scopes when # we make a new function, so we need to make a new function so the inner diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 478c497722..001c798fe3 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -50,7 +50,7 @@ REQUIREMENTS = { "bcrypt": ["bcrypt>=3.1.0"], "pillow": ["PIL"], "pydenticon": ["pydenticon"], - "blist": ["blist"], + "sortedcontainers": ["sortedcontainers"], "pysaml2>=3.0.0": ["saml2>=3.0.0"], "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], @@ -58,6 +58,7 @@ REQUIREMENTS = { "six": ["six"], "prometheus_client": ["prometheus_client"], } + CONDITIONAL_REQUIREMENTS = { "web_client": { "matrix_angular_sdk>=0.6.8": ["syweb>=0.6.8"], diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index a7fe0397fa..817118e30f 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR +from synapse.util import caches -from blist import sorteddict +from sortedcontainers import SortedDict import logging @@ -32,16 +32,18 @@ class StreamChangeCache(object): entities that may have changed since that position. If position key is too old then the cache will simply return all given entities. """ - def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): - self._max_size = int(max_size * CACHE_SIZE_FACTOR) + + def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache=None): + self._max_size = int(max_size * caches.CACHE_SIZE_FACTOR) self._entity_to_key = {} - self._cache = sorteddict() + self._cache = SortedDict() self._earliest_known_stream_pos = current_stream_pos self.name = name - self.metrics = register_cache("cache", self.name, self._cache) + self.metrics = caches.register_cache("cache", self.name, self._cache) - for entity, stream_pos in prefilled_cache.items(): - self.entity_has_changed(entity, stream_pos) + if prefilled_cache: + for entity, stream_pos in prefilled_cache.items(): + self.entity_has_changed(entity, stream_pos) def has_entity_changed(self, entity, stream_pos): """Returns True if the entity may have been updated since stream_pos @@ -65,22 +67,25 @@ class StreamChangeCache(object): return False def get_entities_changed(self, entities, stream_pos): - """Returns subset of entities that have had new things since the - given position. If the position is too old it will just return the given list. + """ + Returns subset of entities that have had new things since the given + position. Entities unknown to the cache will be returned. If the + position is too old it will just return the given list. """ assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - keys = self._cache.keys() - i = keys.bisect_right(stream_pos) + not_known_entities = set(entities) - set(self._entity_to_key) - result = set( - self._cache[k] for k in keys[i:] - ).intersection(entities) + result = ( + set(self._cache.values()[self._cache.bisect_right(stream_pos) :]) + .intersection(entities) + .union(not_known_entities) + ) self.metrics.inc_hits() else: - result = entities + result = set(entities) self.metrics.inc_misses() return result @@ -90,12 +95,13 @@ class StreamChangeCache(object): """ assert type(stream_pos) is int + if not self._cache: + # If we have no cache, nothing can have changed. + return False + if stream_pos >= self._earliest_known_stream_pos: self.metrics.inc_hits() - keys = self._cache.keys() - i = keys.bisect_right(stream_pos) - - return i < len(keys) + return self._cache.bisect_right(stream_pos) < len(self._cache) else: self.metrics.inc_misses() return True @@ -107,10 +113,7 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - keys = self._cache.keys() - i = keys.bisect_right(stream_pos) - - return [self._cache[k] for k in keys[i:]] + return self._cache.values()[self._cache.bisect_right(stream_pos) :] else: return None @@ -129,8 +132,10 @@ class StreamChangeCache(object): self._entity_to_key[entity] = stream_pos while len(self._cache) > self._max_size: - k, r = self._cache.popitem() - self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) + k, r = self._cache.popitem(0) + self._earliest_known_stream_pos = max( + k, self._earliest_known_stream_pos, + ) self._entity_to_key.pop(r, None) def get_max_pos_of_last_change(self, entity): diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py new file mode 100644 index 0000000000..67ece166c7 --- /dev/null +++ b/tests/util/test_stream_change_cache.py @@ -0,0 +1,198 @@ +from tests import unittest +from mock import patch + +from synapse.util.caches.stream_change_cache import StreamChangeCache + + +class StreamChangeCacheTests(unittest.TestCase): + """ + Tests for StreamChangeCache. + """ + + def test_prefilled_cache(self): + """ + Providing a prefilled cache to StreamChangeCache will result in a cache + with the prefilled-cache entered in. + """ + cache = StreamChangeCache("#test", 1, prefilled_cache={"user@foo.com": 2}) + self.assertTrue(cache.has_entity_changed("user@foo.com", 1)) + + def test_has_entity_changed(self): + """ + StreamChangeCache.entity_has_changed will mark entities as changed, and + has_entity_changed will observe the changed entities. + """ + cache = StreamChangeCache("#test", 3) + + cache.entity_has_changed("user@foo.com", 6) + cache.entity_has_changed("bar@baz.net", 7) + + # If it's been changed after that stream position, return True + self.assertTrue(cache.has_entity_changed("user@foo.com", 4)) + self.assertTrue(cache.has_entity_changed("bar@baz.net", 4)) + + # If it's been changed at that stream position, return False + self.assertFalse(cache.has_entity_changed("user@foo.com", 6)) + + # If there's no changes after that stream position, return False + self.assertFalse(cache.has_entity_changed("user@foo.com", 7)) + + # If the entity does not exist, return False. + self.assertFalse(cache.has_entity_changed("not@here.website", 7)) + + # If we request before the stream cache's earliest known position, + # return True, whether it's a known entity or not. + self.assertTrue(cache.has_entity_changed("user@foo.com", 0)) + self.assertTrue(cache.has_entity_changed("not@here.website", 0)) + + @patch("synapse.util.caches.CACHE_SIZE_FACTOR", 1.0) + def test_has_entity_changed_pops_off_start(self): + """ + StreamChangeCache.entity_has_changed will respect the max size and + purge the oldest items upon reaching that max size. + """ + cache = StreamChangeCache("#test", 1, max_size=2) + + cache.entity_has_changed("user@foo.com", 2) + cache.entity_has_changed("bar@baz.net", 3) + cache.entity_has_changed("user@elsewhere.org", 4) + + # The cache is at the max size, 2 + self.assertEqual(len(cache._cache), 2) + + # The oldest item has been popped off + self.assertTrue("user@foo.com" not in cache._entity_to_key) + + # If we update an existing entity, it keeps the two existing entities + cache.entity_has_changed("bar@baz.net", 5) + self.assertEqual( + set(["bar@baz.net", "user@elsewhere.org"]), set(cache._entity_to_key) + ) + + def test_get_all_entities_changed(self): + """ + StreamChangeCache.get_all_entities_changed will return all changed + entities since the given position. If the position is before the start + of the known stream, it returns None instead. + """ + cache = StreamChangeCache("#test", 1) + + cache.entity_has_changed("user@foo.com", 2) + cache.entity_has_changed("bar@baz.net", 3) + cache.entity_has_changed("user@elsewhere.org", 4) + + self.assertEqual( + cache.get_all_entities_changed(1), + ["user@foo.com", "bar@baz.net", "user@elsewhere.org"], + ) + self.assertEqual( + cache.get_all_entities_changed(2), ["bar@baz.net", "user@elsewhere.org"] + ) + self.assertEqual(cache.get_all_entities_changed(3), ["user@elsewhere.org"]) + self.assertEqual(cache.get_all_entities_changed(0), None) + + def test_has_any_entity_changed(self): + """ + StreamChangeCache.has_any_entity_changed will return True if any + entities have been changed since the provided stream position, and + False if they have not. If the cache has entries and the provided + stream position is before it, it will return True, otherwise False if + the cache has no entries. + """ + cache = StreamChangeCache("#test", 1) + + # With no entities, it returns False for the past, present, and future. + self.assertFalse(cache.has_any_entity_changed(0)) + self.assertFalse(cache.has_any_entity_changed(1)) + self.assertFalse(cache.has_any_entity_changed(2)) + + # We add an entity + cache.entity_has_changed("user@foo.com", 2) + + # With an entity, it returns True for the past, the stream start + # position, and False for the stream position the entity was changed + # on and ones after it. + self.assertTrue(cache.has_any_entity_changed(0)) + self.assertTrue(cache.has_any_entity_changed(1)) + self.assertFalse(cache.has_any_entity_changed(2)) + self.assertFalse(cache.has_any_entity_changed(3)) + + def test_get_entities_changed(self): + """ + StreamChangeCache.get_entities_changed will return the entities in the + given list that have changed since the provided stream ID. If the + stream position is earlier than the earliest known position, it will + return all of the entities queried for. + """ + cache = StreamChangeCache("#test", 1) + + cache.entity_has_changed("user@foo.com", 2) + cache.entity_has_changed("bar@baz.net", 3) + cache.entity_has_changed("user@elsewhere.org", 4) + + # Query all the entries, but mid-way through the stream. We should only + # get the ones after that point. + self.assertEqual( + cache.get_entities_changed( + ["user@foo.com", "bar@baz.net", "user@elsewhere.org"], stream_pos=2 + ), + set(["bar@baz.net", "user@elsewhere.org"]), + ) + + # Query all the entries mid-way through the stream, but include one + # that doesn't exist in it. We should get back the one that doesn't + # exist, too. + self.assertEqual( + cache.get_entities_changed( + [ + "user@foo.com", + "bar@baz.net", + "user@elsewhere.org", + "not@here.website", + ], + stream_pos=2, + ), + set(["bar@baz.net", "user@elsewhere.org", "not@here.website"]), + ) + + # Query all the entries, but before the first known point. We will get + # all the entries we queried for, including ones that don't exist. + self.assertEqual( + cache.get_entities_changed( + [ + "user@foo.com", + "bar@baz.net", + "user@elsewhere.org", + "not@here.website", + ], + stream_pos=0, + ), + set( + [ + "user@foo.com", + "bar@baz.net", + "user@elsewhere.org", + "not@here.website", + ] + ), + ) + + def test_max_pos(self): + """ + StreamChangeCache.get_max_pos_of_last_change will return the most + recent point where the entity could have changed. If the entity is not + known, the stream start is provided instead. + """ + cache = StreamChangeCache("#test", 1) + + cache.entity_has_changed("user@foo.com", 2) + cache.entity_has_changed("bar@baz.net", 3) + cache.entity_has_changed("user@elsewhere.org", 4) + + # Known entities will return the point where they were changed. + self.assertEqual(cache.get_max_pos_of_last_change("user@foo.com"), 2) + self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3) + self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4) + + # Unknown entities will return the stream start position. + self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1) -- cgit 1.5.1 From a61738b316db70a4184d5c355696e0a039e7867f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 14 Jun 2018 18:27:37 +1000 Subject: Remove run_on_reactor (#3395) --- synapse/federation/transaction_queue.py | 4 ---- synapse/handlers/auth.py | 8 ++------ synapse/handlers/federation.py | 6 +----- synapse/handlers/identity.py | 8 -------- synapse/handlers/message.py | 4 +--- synapse/handlers/register.py | 5 +---- synapse/push/pusherpool.py | 3 --- synapse/rest/client/v1/register.py | 7 ------- synapse/rest/client/v2_alpha/account.py | 7 ------- synapse/rest/client/v2_alpha/register.py | 3 --- synapse/util/async.py | 10 +--------- tests/test_distributor.py | 2 -- tests/util/caches/test_descriptors.py | 2 -- 13 files changed, 6 insertions(+), 63 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index f0aeb5a0d3..bcbce7f6eb 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -21,7 +21,6 @@ from .units import Transaction, Edu from synapse.api.errors import HttpResponseException, FederationDeniedError from synapse.util import logcontext, PreserveLoggingContext -from synapse.util.async import run_on_reactor from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.metrics import measure_func from synapse.handlers.presence import format_user_presence_state, get_interested_remotes @@ -451,9 +450,6 @@ class TransactionQueue(object): # hence why we throw the result away. yield get_retry_limiter(destination, self.clock, self.store) - # XXX: what's this for? - yield run_on_reactor() - pending_pdus = [] while True: device_message_edus, device_stream_id, dev_list_id = ( diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 912136534d..dabc744890 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from twisted.internet import defer, threads from ._base import BaseHandler @@ -23,7 +24,6 @@ from synapse.api.errors import ( ) from synapse.module_api import ModuleApi from synapse.types import UserID -from synapse.util.async import run_on_reactor from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logcontext import make_deferred_yieldable @@ -423,15 +423,11 @@ class AuthHandler(BaseHandler): def _check_msisdn(self, authdict, _): return self._check_threepid('msisdn', authdict) - @defer.inlineCallbacks def _check_dummy_auth(self, authdict, _): - yield run_on_reactor() - defer.returnValue(True) + return defer.succeed(True) @defer.inlineCallbacks def _check_threepid(self, medium, authdict): - yield run_on_reactor() - if 'threepid_creds' not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 495ac4c648..af94bf33bc 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -39,7 +39,7 @@ from synapse.events.validator import EventValidator from synapse.util import unwrapFirstError, logcontext from synapse.util.metrics import measure_func from synapse.util.logutils import log_function -from synapse.util.async import run_on_reactor, Linearizer +from synapse.util.async import Linearizer from synapse.util.frozenutils import unfreeze from synapse.crypto.event_signing import ( compute_event_signature, add_hashes_and_signatures, @@ -1381,8 +1381,6 @@ class FederationHandler(BaseHandler): def get_state_for_pdu(self, room_id, event_id): """Returns the state at the event. i.e. not including said event. """ - yield run_on_reactor() - state_groups = yield self.store.get_state_groups( room_id, [event_id] ) @@ -1425,8 +1423,6 @@ class FederationHandler(BaseHandler): def get_state_ids_for_pdu(self, room_id, event_id): """Returns the state at the event. i.e. not including said event. """ - yield run_on_reactor() - state_groups = yield self.store.get_state_groups_ids( room_id, [event_id] ) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 529400955d..f00dfe1d3e 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -27,7 +27,6 @@ from synapse.api.errors import ( MatrixCodeMessageException, CodeMessageException ) from ._base import BaseHandler -from synapse.util.async import run_on_reactor from synapse.api.errors import SynapseError, Codes logger = logging.getLogger(__name__) @@ -62,8 +61,6 @@ class IdentityHandler(BaseHandler): @defer.inlineCallbacks def threepid_from_creds(self, creds): - yield run_on_reactor() - if 'id_server' in creds: id_server = creds['id_server'] elif 'idServer' in creds: @@ -106,7 +103,6 @@ class IdentityHandler(BaseHandler): @defer.inlineCallbacks def bind_threepid(self, creds, mxid): - yield run_on_reactor() logger.debug("binding threepid %r to %s", creds, mxid) data = None @@ -188,8 +184,6 @@ class IdentityHandler(BaseHandler): @defer.inlineCallbacks def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs): - yield run_on_reactor() - if not self._should_trust_id_server(id_server): raise SynapseError( 400, "Untrusted ID server '%s'" % id_server, @@ -224,8 +218,6 @@ class IdentityHandler(BaseHandler): self, id_server, country, phone_number, client_secret, send_attempt, **kwargs ): - yield run_on_reactor() - if not self._should_trust_id_server(id_server): raise SynapseError( 400, "Untrusted ID server '%s'" % id_server, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1cb81b6cf8..18dcc6d196 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -36,7 +36,7 @@ from synapse.events.validator import EventValidator from synapse.types import ( UserID, RoomAlias, RoomStreamToken, ) -from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter +from synapse.util.async import ReadWriteLock, Limiter from synapse.util.logcontext import run_in_background from synapse.util.metrics import measure_func from synapse.util.frozenutils import frozendict_json_encoder @@ -959,9 +959,7 @@ class EventCreationHandler(object): event_stream_id, max_stream_id ) - @defer.inlineCallbacks def _notify(): - yield run_on_reactor() try: self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7e52adda3c..e76ef5426d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -24,7 +24,7 @@ from synapse.api.errors import ( from synapse.http.client import CaptchaServerHttpClient from synapse import types from synapse.types import UserID, create_requester, RoomID, RoomAlias -from synapse.util.async import run_on_reactor, Linearizer +from synapse.util.async import Linearizer from synapse.util.threepids import check_3pid_allowed from ._base import BaseHandler @@ -139,7 +139,6 @@ class RegistrationHandler(BaseHandler): Raises: RegistrationError if there was a problem registering. """ - yield run_on_reactor() password_hash = None if password: password_hash = yield self.auth_handler().hash(password) @@ -431,8 +430,6 @@ class RegistrationHandler(BaseHandler): Raises: RegistrationError if there was a problem registering. """ - yield run_on_reactor() - if localpart is None: raise SynapseError(400, "Request must include user id") diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 750d11ca38..36bb5bbc65 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -19,7 +19,6 @@ import logging from twisted.internet import defer from synapse.push.pusher import PusherFactory -from synapse.util.async import run_on_reactor from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -125,7 +124,6 @@ class PusherPool: @defer.inlineCallbacks def on_new_notifications(self, min_stream_id, max_stream_id): - yield run_on_reactor() try: users_affected = yield self.store.get_push_action_users_in_range( min_stream_id, max_stream_id @@ -151,7 +149,6 @@ class PusherPool: @defer.inlineCallbacks def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): - yield run_on_reactor() try: # Need to subtract 1 from the minimum because the lower bound here # is not inclusive diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 9b3022e0b0..c10320dedf 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -24,8 +24,6 @@ import synapse.util.stringutils as stringutils from synapse.http.servlet import parse_json_object_from_request from synapse.types import create_requester -from synapse.util.async import run_on_reactor - from hashlib import sha1 import hmac import logging @@ -272,7 +270,6 @@ class RegisterRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def _do_password(self, request, register_json, session): - yield run_on_reactor() if (self.hs.config.enable_registration_captcha and not session[LoginType.RECAPTCHA]): # captcha should've been done by this stage! @@ -333,8 +330,6 @@ class RegisterRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def _do_shared_secret(self, request, register_json, session): - yield run_on_reactor() - if not isinstance(register_json.get("mac", None), string_types): raise SynapseError(400, "Expected mac.") if not isinstance(register_json.get("user", None), string_types): @@ -423,8 +418,6 @@ class CreateUserRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def _do_create(self, requester, user_json): - yield run_on_reactor() - if "localpart" not in user_json: raise SynapseError(400, "Expected 'localpart' key.") diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 0291fba9e7..e1281cfbb6 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -24,7 +24,6 @@ from synapse.http.servlet import ( RestServlet, assert_params_in_request, parse_json_object_from_request, ) -from synapse.util.async import run_on_reactor from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.threepids import check_3pid_allowed from ._base import client_v2_patterns, interactive_auth_handler @@ -300,8 +299,6 @@ class ThreepidRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - yield run_on_reactor() - requester = yield self.auth.get_user_by_req(request) threepids = yield self.datastore.user_get_threepids( @@ -312,8 +309,6 @@ class ThreepidRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - yield run_on_reactor() - body = parse_json_object_from_request(request) threePidCreds = body.get('threePidCreds') @@ -365,8 +360,6 @@ class ThreepidDeleteRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - yield run_on_reactor() - body = parse_json_object_from_request(request) required = ['medium', 'address'] diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5cab00aea9..97e7c0f7c6 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -32,7 +32,6 @@ from ._base import client_v2_patterns, interactive_auth_handler import logging import hmac from hashlib import sha1 -from synapse.util.async import run_on_reactor from synapse.util.ratelimitutils import FederationRateLimiter from six import string_types @@ -191,8 +190,6 @@ class RegisterRestServlet(RestServlet): @interactive_auth_handler @defer.inlineCallbacks def on_POST(self, request): - yield run_on_reactor() - body = parse_json_object_from_request(request) kind = "user" diff --git a/synapse/util/async.py b/synapse/util/async.py index 9dd4e6b5bc..b8e57efc54 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - from twisted.internet import defer, reactor from twisted.internet.defer import CancelledError from twisted.python import failure @@ -41,13 +40,6 @@ def sleep(seconds): defer.returnValue(res) -def run_on_reactor(): - """ This will cause the rest of the function to be invoked upon the next - iteration of the main loop - """ - return sleep(0) - - class ObservableDeferred(object): """Wraps a deferred object so that we can add observer deferreds. These observer deferreds do not affect the callback chain of the original @@ -227,7 +219,7 @@ class Linearizer(object): # the context manager, but it needs to happen while we hold the # lock, and the context manager's exit code must be synchronous, # so actually this is the only sensible place. - yield run_on_reactor() + yield sleep(0) else: logger.info("Acquired uncontended linearizer lock %r for key %r", diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 010aeaee7e..c066381698 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -19,7 +19,6 @@ from twisted.internet import defer from mock import Mock, patch from synapse.util.distributor import Distributor -from synapse.util.async import run_on_reactor class DistributorTestCase(unittest.TestCase): @@ -95,7 +94,6 @@ class DistributorTestCase(unittest.TestCase): @defer.inlineCallbacks def observer(): - yield run_on_reactor() raise MyException("Oopsie") self.dist.observe("whail", observer) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 2516fe40f4..24754591df 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -18,7 +18,6 @@ import logging import mock from synapse.api.errors import SynapseError -from synapse.util import async from synapse.util import logcontext from twisted.internet import defer from synapse.util.caches import descriptors @@ -195,7 +194,6 @@ class DescriptorTestCase(unittest.TestCase): def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): - yield async.run_on_reactor() raise SynapseError(400, "blah") return inner_fn() -- cgit 1.5.1 From b088aafcae71fa61433f92ce0b139802a9cbd304 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jun 2018 06:15:03 +0100 Subject: Log number of events fetched from DB When we finish processing a request, log the number of events we fetched from the database to handle it. [I'm trying to figure out which requests are responsible for large amounts of event cache churn. It may turn out to be more helpful to add counts to the prometheus per-request/block metrics, but that is an extension to this code anyway.] --- synapse/http/site.py | 5 ++++- synapse/storage/events_worker.py | 4 ++++ synapse/util/logcontext.py | 4 ++++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/http/site.py b/synapse/http/site.py index 2664006f8c..74a752d6cf 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -99,16 +99,18 @@ class SynapseRequest(Request): db_txn_count = context.db_txn_count db_txn_duration_sec = context.db_txn_duration_sec db_sched_duration_sec = context.db_sched_duration_sec + evt_db_fetch_count = context.evt_db_fetch_count except Exception: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration_sec = (0, 0) + evt_db_fetch_count = 0 end_time = time.time() self.site.access_logger.info( "%s - %s - {%s}" " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" - " %sB %s \"%s %s %s\" \"%s\"", + " %sB %s \"%s %s %s\" \"%s\" [%d dbevts]", self.getClientIP(), self.site.site_tag, self.authenticated_entity, @@ -124,6 +126,7 @@ class SynapseRequest(Request): self.get_redacted_uri(), self.clientproto, self.get_user_agent(), + evt_db_fetch_count, ) try: diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 32d9d00ffb..cf4efa9d12 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -21,6 +21,7 @@ from synapse.events.utils import prune_event from synapse.util.logcontext import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background, + LoggingContext, ) from synapse.util.metrics import Measure from synapse.api.errors import SynapseError @@ -145,6 +146,9 @@ class EventsWorkerStore(SQLBaseStore): missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: + log_ctx = LoggingContext.current_context() + log_ctx.evt_db_fetch_count += len(missing_events_ids) + missing_events = yield self._enqueue_events( missing_events_ids, check_redacted=check_redacted, diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a58c723403..2bfd4bee96 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -60,6 +60,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", + "evt_db_fetch_count", "usage_start", "main_thread", "alive", "request", "tag", @@ -109,6 +110,9 @@ class LoggingContext(object): # sec spent waiting for db txns to be scheduled self.db_sched_duration_sec = 0 + # number of events this thread has fetched from the db + self.evt_db_fetch_count = 0 + # If alive has the thread resource usage when the logcontext last # became active. self.usage_start = None -- cgit 1.5.1 From 77ac14b960cb8daef76062ce85fc0427749b48af Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 22 Jun 2018 09:37:10 +0100 Subject: Pass around the reactor explicitly (#3385) --- synapse/handlers/auth.py | 30 +++++++++++++++++++---------- synapse/handlers/message.py | 1 + synapse/handlers/user_directory.py | 9 ++++----- synapse/http/client.py | 6 +++--- synapse/http/matrixfederationclient.py | 5 +++-- synapse/notifier.py | 3 +++ synapse/replication/http/send_event.py | 6 +++--- synapse/rest/media/v1/media_repository.py | 3 ++- synapse/rest/media/v1/media_storage.py | 7 +++++-- synapse/server.py | 19 ++++++++++++++---- synapse/storage/background_updates.py | 3 +-- synapse/storage/client_ips.py | 6 ++++-- synapse/storage/event_push_actions.py | 3 +-- synapse/storage/events_worker.py | 6 +++--- synapse/util/__init__.py | 32 +++++++++++++++++++------------ synapse/util/async.py | 25 +++++++++++------------- synapse/util/file_consumer.py | 16 +++++++++++----- synapse/util/ratelimitutils.py | 3 +-- tests/crypto/test_keyring.py | 9 +++++---- tests/rest/client/test_transactions.py | 6 +++--- tests/rest/media/v1/test_media_storage.py | 5 +++-- tests/util/test_file_consumer.py | 6 +++--- tests/util/test_linearizer.py | 7 ++++--- tests/util/test_logcontext.py | 11 ++++++----- tests/utils.py | 7 ++++++- 25 files changed, 141 insertions(+), 93 deletions(-) (limited to 'synapse/util') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index dabc744890..a131b7f73f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -33,6 +33,7 @@ import logging import bcrypt import pymacaroons import simplejson +import attr import synapse.util.stringutils as stringutils @@ -854,7 +855,11 @@ class AuthHandler(BaseHandler): return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper, bcrypt.gensalt(self.bcrypt_rounds)) - return make_deferred_yieldable(threads.deferToThread(_do_hash)) + return make_deferred_yieldable( + threads.deferToThreadPool( + self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash + ), + ) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -874,16 +879,21 @@ class AuthHandler(BaseHandler): ) if stored_hash: - return make_deferred_yieldable(threads.deferToThread(_do_validate_hash)) + return make_deferred_yieldable( + threads.deferToThreadPool( + self.hs.get_reactor(), + self.hs.get_reactor().getThreadPool(), + _do_validate_hash, + ), + ) else: return defer.succeed(False) -class MacaroonGeneartor(object): - def __init__(self, hs): - self.clock = hs.get_clock() - self.server_name = hs.config.server_name - self.macaroon_secret_key = hs.config.macaroon_secret_key +@attr.s +class MacaroonGenerator(object): + + hs = attr.ib() def generate_access_token(self, user_id, extra_caveats=None): extra_caveats = extra_caveats or [] @@ -901,7 +911,7 @@ class MacaroonGeneartor(object): def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)): macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = login") - now = self.clock.time_msec() + now = self.hs.get_clock().time_msec() expiry = now + duration_in_ms macaroon.add_first_party_caveat("time < %d" % (expiry,)) return macaroon.serialize() @@ -913,9 +923,9 @@ class MacaroonGeneartor(object): def _generate_base_macaroon(self, user_id): macaroon = pymacaroons.Macaroon( - location=self.server_name, + location=self.hs.config.server_name, identifier="key", - key=self.macaroon_secret_key) + key=self.hs.config.macaroon_secret_key) macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) return macaroon diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 18dcc6d196..7b9946ab91 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -806,6 +806,7 @@ class EventCreationHandler(object): # If we're a worker we need to hit out to the master. if self.config.worker_app: yield send_event_to_master( + self.hs.get_clock(), self.http_client, host=self.config.worker_replication_host, port=self.config.worker_replication_http_port, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index a39f0f7343..7e4a114d4f 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -19,7 +19,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.storage.roommember import ProfileInfo from synapse.util.metrics import Measure -from synapse.util.async import sleep from synapse.types import get_localpart_from_id from six import iteritems @@ -174,7 +173,7 @@ class UserDirectoryHandler(object): logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids)) yield self._handle_initial_room(room_id) num_processed_rooms += 1 - yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) + yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) logger.info("Processed all rooms.") @@ -188,7 +187,7 @@ class UserDirectoryHandler(object): logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids)) yield self._handle_local_user(user_id) num_processed_users += 1 - yield sleep(self.INITIAL_USER_SLEEP_MS / 1000.) + yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.) logger.info("Processed all users") @@ -236,7 +235,7 @@ class UserDirectoryHandler(object): count = 0 for user_id in user_ids: if count % self.INITIAL_ROOM_SLEEP_COUNT == 0: - yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) + yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) if not self.is_mine_id(user_id): count += 1 @@ -251,7 +250,7 @@ class UserDirectoryHandler(object): continue if count % self.INITIAL_ROOM_SLEEP_COUNT == 0: - yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) + yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.) count += 1 user_set = (user_id, other_user_id) diff --git a/synapse/http/client.py b/synapse/http/client.py index 8064a84c5c..46ffb41de1 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -98,8 +98,8 @@ class SimpleHttpClient(object): method, uri, *args, **kwargs ) add_timeout_to_deferred( - request_deferred, - 60, cancelled_to_request_timed_out_error, + request_deferred, 60, self.hs.get_reactor(), + cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) @@ -115,7 +115,7 @@ class SimpleHttpClient(object): "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.message ) - raise e + raise @defer.inlineCallbacks def post_urlencoded_get_json(self, uri, args={}, headers=None): diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 993dc06e02..4e0399e762 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -22,7 +22,7 @@ from twisted.web._newclient import ResponseDone from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint import synapse.metrics -from synapse.util.async import sleep, add_timeout_to_deferred +from synapse.util.async import add_timeout_to_deferred from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable import synapse.util.retryutils @@ -193,6 +193,7 @@ class MatrixFederationHttpClient(object): add_timeout_to_deferred( request_deferred, timeout / 1000. if timeout else 60, + self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( @@ -234,7 +235,7 @@ class MatrixFederationHttpClient(object): delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) - yield sleep(delay) + yield self.clock.sleep(delay) retries_left -= 1 else: raise diff --git a/synapse/notifier.py b/synapse/notifier.py index 6dce20a284..3c0622a294 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -161,6 +161,7 @@ class Notifier(object): self.user_to_user_stream = {} self.room_to_user_streams = {} + self.hs = hs self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] @@ -340,6 +341,7 @@ class Notifier(object): add_timeout_to_deferred( listener.deferred, (end_time - now) / 1000., + self.hs.get_reactor(), ) with PreserveLoggingContext(): yield listener.deferred @@ -561,6 +563,7 @@ class Notifier(object): add_timeout_to_deferred( listener.deferred.addTimeout, (end_time - now) / 1000., + self.hs.get_reactor(), ) try: with PreserveLoggingContext(): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index a9baa2c1c3..f080f96cc1 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -21,7 +21,6 @@ from synapse.api.errors import ( from synapse.events import FrozenEvent from synapse.events.snapshot import EventContext from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.util.async import sleep from synapse.util.caches.response_cache import ResponseCache from synapse.util.metrics import Measure from synapse.types import Requester, UserID @@ -33,11 +32,12 @@ logger = logging.getLogger(__name__) @defer.inlineCallbacks -def send_event_to_master(client, host, port, requester, event, context, +def send_event_to_master(clock, client, host, port, requester, event, context, ratelimit, extra_users): """Send event to be handled on the master Args: + clock (synapse.util.Clock) client (SimpleHttpClient) host (str): host of master port (int): port on master listening for HTTP replication @@ -77,7 +77,7 @@ def send_event_to_master(client, host, port, requester, event, context, # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. - yield sleep(1) + yield clock.sleep(1) except MatrixCodeMessageException as e: # We convert to SynapseError as we know that it was a SynapseError # on the master process that we should send to the client. (And diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 2ac767d2dc..218ba7a083 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -58,6 +58,7 @@ UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 class MediaRepository(object): def __init__(self, hs): + self.hs = hs self.auth = hs.get_auth() self.client = MatrixFederationHttpClient(hs) self.clock = hs.get_clock() @@ -94,7 +95,7 @@ class MediaRepository(object): storage_providers.append(provider) self.media_storage = MediaStorage( - self.primary_base_path, self.filepaths, storage_providers, + self.hs, self.primary_base_path, self.filepaths, storage_providers, ) self.clock.looping_call( diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index d23fe10b07..d6b8ebbedb 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -37,13 +37,15 @@ class MediaStorage(object): """Responsible for storing/fetching files from local sources. Args: + hs (synapse.server.Homeserver) local_media_directory (str): Base path where we store media on disk filepaths (MediaFilePaths) storage_providers ([StorageProvider]): List of StorageProvider that are used to fetch and store files. """ - def __init__(self, local_media_directory, filepaths, storage_providers): + def __init__(self, hs, local_media_directory, filepaths, storage_providers): + self.hs = hs self.local_media_directory = local_media_directory self.filepaths = filepaths self.storage_providers = storage_providers @@ -175,7 +177,8 @@ class MediaStorage(object): res = yield provider.fetch(path, file_info) if res: with res: - consumer = BackgroundFileConsumer(open(local_path, "w")) + consumer = BackgroundFileConsumer( + open(local_path, "w"), self.hs.get_reactor()) yield res.write_to_consumer(consumer) yield consumer.wait() defer.returnValue(local_path) diff --git a/synapse/server.py b/synapse/server.py index 58dbf78437..c29c19289a 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -40,7 +40,7 @@ from synapse.federation.transport.client import TransportLayerClient from synapse.federation.transaction_queue import TransactionQueue from synapse.handlers import Handlers from synapse.handlers.appservice import ApplicationServicesHandler -from synapse.handlers.auth import AuthHandler, MacaroonGeneartor +from synapse.handlers.auth import AuthHandler, MacaroonGenerator from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.device import DeviceHandler @@ -165,15 +165,19 @@ class HomeServer(object): 'server_notices_sender', ] - def __init__(self, hostname, **kwargs): + def __init__(self, hostname, reactor=None, **kwargs): """ Args: hostname : The hostname for the server. """ + if not reactor: + from twisted.internet import reactor + + self._reactor = reactor self.hostname = hostname self._building = {} - self.clock = Clock() + self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() @@ -186,6 +190,12 @@ class HomeServer(object): self.datastore = DataStore(self.get_db_conn(), self) logger.info("Finished setting up.") + def get_reactor(self): + """ + Fetch the Twisted reactor in use by this HomeServer. + """ + return self._reactor + def get_ip_from_request(self, request): # X-Forwarded-For is handled by our custom request type. return request.getClientIP() @@ -261,7 +271,7 @@ class HomeServer(object): return AuthHandler(self) def build_macaroon_generator(self): - return MacaroonGeneartor(self) + return MacaroonGenerator(self) def build_device_handler(self): return DeviceHandler(self) @@ -328,6 +338,7 @@ class HomeServer(object): return adbapi.ConnectionPool( name, + cp_reactor=self.get_reactor(), **self.db_config.get("args", {}) ) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 8af325a9f5..b7e9c716c8 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.util.async from ._base import SQLBaseStore from . import engines @@ -92,7 +91,7 @@ class BackgroundUpdateStore(SQLBaseStore): logger.info("Starting background schema updates") while True: - yield synapse.util.async.sleep( + yield self.hs.get_clock().sleep( self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.) try: diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index ce338514e8..968d2fed22 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -15,7 +15,7 @@ import logging -from twisted.internet import defer, reactor +from twisted.internet import defer from ._base import Cache from . import background_updates @@ -70,7 +70,9 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): self._client_ip_looper = self._clock.looping_call( self._update_client_ips_batch, 5 * 1000 ) - reactor.addSystemEventTrigger("before", "shutdown", self._update_client_ips_batch) + self.hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", self._update_client_ips_batch + ) def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id, now=None): diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d0350ee5fe..c4a0208ce4 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -16,7 +16,6 @@ from synapse.storage._base import SQLBaseStore, LoggingTransaction from twisted.internet import defer -from synapse.util.async import sleep from synapse.util.caches.descriptors import cachedInlineCallbacks import logging @@ -800,7 +799,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): ) if caught_up: break - yield sleep(5) + yield self.hs.get_clock().sleep(5) finally: self._doing_notif_rotation = False diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 32d9d00ffb..38fcf7d444 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from twisted.internet import defer, reactor +from twisted.internet import defer from synapse.events import FrozenEvent from synapse.events.utils import prune_event @@ -265,7 +265,7 @@ class EventsWorkerStore(SQLBaseStore): except Exception: logger.exception("Failed to callback") with PreserveLoggingContext(): - reactor.callFromThread(fire, event_list, row_dict) + self.hs.get_reactor().callFromThread(fire, event_list, row_dict) except Exception as e: logger.exception("do_fetch") @@ -278,7 +278,7 @@ class EventsWorkerStore(SQLBaseStore): if event_list: with PreserveLoggingContext(): - reactor.callFromThread(fire, event_list) + self.hs.get_reactor().callFromThread(fire, event_list) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index fc11e26623..2a3df7c71d 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import PreserveLoggingContext - -from twisted.internet import defer, reactor, task - -import time import logging - from itertools import islice +import attr +from twisted.internet import defer, task + +from synapse.util.logcontext import PreserveLoggingContext + logger = logging.getLogger(__name__) @@ -31,16 +30,24 @@ def unwrapFirstError(failure): return failure.value.subFailure +@attr.s class Clock(object): - """A small utility that obtains current time-of-day so that time may be - mocked during unit-tests. - - TODO(paul): Also move the sleep() functionality into it """ + A Clock wraps a Twisted reactor and provides utilities on top of it. + """ + _reactor = attr.ib() + + @defer.inlineCallbacks + def sleep(self, seconds): + d = defer.Deferred() + with PreserveLoggingContext(): + self._reactor.callLater(seconds, d.callback, seconds) + res = yield d + defer.returnValue(res) def time(self): """Returns the current system time in seconds since epoch.""" - return time.time() + return self._reactor.seconds() def time_msec(self): """Returns the current system time in miliseconds since epoch.""" @@ -56,6 +63,7 @@ class Clock(object): msec(float): How long to wait between calls in milliseconds. """ call = task.LoopingCall(f) + call.clock = self._reactor call.start(msec / 1000.0, now=False) return call @@ -73,7 +81,7 @@ class Clock(object): callback(*args, **kwargs) with PreserveLoggingContext(): - return reactor.callLater(delay, wrapped_callback, *args, **kwargs) + return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) def cancel_call_later(self, timer, ignore_errs=False): try: diff --git a/synapse/util/async.py b/synapse/util/async.py index b8e57efc54..1668df4ce6 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.python import failure from .logcontext import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background ) -from synapse.util import logcontext, unwrapFirstError +from synapse.util import logcontext, unwrapFirstError, Clock from contextlib import contextmanager @@ -31,15 +31,6 @@ from six.moves import range logger = logging.getLogger(__name__) -@defer.inlineCallbacks -def sleep(seconds): - d = defer.Deferred() - with PreserveLoggingContext(): - reactor.callLater(seconds, d.callback, seconds) - res = yield d - defer.returnValue(res) - - class ObservableDeferred(object): """Wraps a deferred object so that we can add observer deferreds. These observer deferreds do not affect the callback chain of the original @@ -172,13 +163,18 @@ class Linearizer(object): # do some work. """ - def __init__(self, name=None): + def __init__(self, name=None, clock=None): if name is None: self.name = id(self) else: self.name = name self.key_to_defer = {} + if not clock: + from twisted.internet import reactor + clock = Clock(reactor) + self._clock = clock + @defer.inlineCallbacks def queue(self, key): # If there is already a deferred in the queue, we pull it out so that @@ -219,7 +215,7 @@ class Linearizer(object): # the context manager, but it needs to happen while we hold the # lock, and the context manager's exit code must be synchronous, # so actually this is the only sensible place. - yield sleep(0) + yield self._clock.sleep(0) else: logger.info("Acquired uncontended linearizer lock %r for key %r", @@ -396,7 +392,7 @@ class DeferredTimeoutError(Exception): """ -def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None): +def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): """ Add a timeout to a deferred by scheduling it to be cancelled after timeout seconds. @@ -411,6 +407,7 @@ def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None): Args: deferred (defer.Deferred): deferred to be timed out timeout (Number): seconds to time out after + reactor (twisted.internet.reactor): the Twisted reactor to use on_timeout_cancel (callable): A callable which is called immediately after the deferred times out, and not if this deferred is diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 3380970e4e..c78801015b 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import threads, reactor +from twisted.internet import threads from synapse.util.logcontext import make_deferred_yieldable, run_in_background @@ -27,6 +27,7 @@ class BackgroundFileConsumer(object): Args: file_obj (file): The file like object to write to. Closed when finished. + reactor (twisted.internet.reactor): the Twisted reactor to use """ # For PushProducers pause if we have this many unwritten slices @@ -34,9 +35,11 @@ class BackgroundFileConsumer(object): # And resume once the size of the queue is less than this _RESUME_ON_QUEUE_SIZE = 2 - def __init__(self, file_obj): + def __init__(self, file_obj, reactor): self._file_obj = file_obj + self._reactor = reactor + # Producer we're registered with self._producer = None @@ -71,7 +74,10 @@ class BackgroundFileConsumer(object): self._producer = producer self.streaming = streaming self._finished_deferred = run_in_background( - threads.deferToThread, self._writer + threads.deferToThreadPool, + self._reactor, + self._reactor.getThreadPool(), + self._writer, ) if not streaming: self._producer.resumeProducing() @@ -109,7 +115,7 @@ class BackgroundFileConsumer(object): # producer. if self._producer and self._paused_producer: if self._bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE: - reactor.callFromThread(self._resume_paused_producer) + self._reactor.callFromThread(self._resume_paused_producer) bytes = self._bytes_queue.get() @@ -121,7 +127,7 @@ class BackgroundFileConsumer(object): # If its a pull producer then we need to explicitly ask for # more stuff. if not self.streaming and self._producer: - reactor.callFromThread(self._producer.resumeProducing) + self._reactor.callFromThread(self._producer.resumeProducing) except Exception as e: self._write_exception = e raise diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 0ab63c3d7d..c5a45cef7c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -17,7 +17,6 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError -from synapse.util.async import sleep from synapse.util.logcontext import ( run_in_background, make_deferred_yieldable, PreserveLoggingContext, @@ -153,7 +152,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = run_in_background(sleep, self.sleep_msec / 1000.0) + ret_defer = run_in_background(self.clock.sleep, self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 149e443022..cc1c862ba4 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,10 +19,10 @@ import signedjson.sign from mock import Mock from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.util import async, logcontext +from synapse.util import logcontext, Clock from synapse.util.logcontext import LoggingContext from tests import unittest, utils -from twisted.internet import defer +from twisted.internet import defer, reactor class MockPerspectiveServer(object): @@ -118,6 +118,7 @@ class KeyringTestCase(unittest.TestCase): @defer.inlineCallbacks def test_verify_json_objects_for_server_awaits_previous_requests(self): + clock = Clock(reactor) key1 = signedjson.key.generate_signing_key(1) kr = keyring.Keyring(self.hs) @@ -167,7 +168,7 @@ class KeyringTestCase(unittest.TestCase): # wait a tick for it to send the request to the perspectives server # (it first tries the datastore) - yield async.sleep(1) # XXX find out why this takes so long! + yield clock.sleep(1) # XXX find out why this takes so long! self.http_client.post_json.assert_called_once() self.assertIs(LoggingContext.current_context(), context_11) @@ -183,7 +184,7 @@ class KeyringTestCase(unittest.TestCase): res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1)], ) - yield async.sleep(1) + yield clock.sleep(1) self.http_client.post_json.assert_not_called() res_deferreds_2[0].addBoth(self.check_context, None) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index b5bc2fa255..6a757289db 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,9 +1,9 @@ from synapse.rest.client.transactions import HttpTransactionCache from synapse.rest.client.transactions import CLEANUP_PERIOD_MS -from twisted.internet import defer +from twisted.internet import defer, reactor from mock import Mock, call -from synapse.util import async +from synapse.util import Clock from synapse.util.logcontext import LoggingContext from tests import unittest from tests.utils import MockClock @@ -46,7 +46,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): def test_logcontexts_with_async_result(self): @defer.inlineCallbacks def cb(): - yield async.sleep(0) + yield Clock(reactor).sleep(0) defer.returnValue("yay") @defer.inlineCallbacks diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index eef38b6781..c5e2f5549a 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -14,7 +14,7 @@ # limitations under the License. -from twisted.internet import defer +from twisted.internet import defer, reactor from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import MediaStorage @@ -38,6 +38,7 @@ class MediaStorageTests(unittest.TestCase): self.secondary_base_path = os.path.join(self.test_dir, "secondary") hs = Mock() + hs.get_reactor = Mock(return_value=reactor) hs.config.media_store_path = self.primary_base_path storage_providers = [FileStorageProviderBackend( @@ -46,7 +47,7 @@ class MediaStorageTests(unittest.TestCase): self.filepaths = MediaFilePaths(self.primary_base_path) self.media_storage = MediaStorage( - self.primary_base_path, self.filepaths, storage_providers, + hs, self.primary_base_path, self.filepaths, storage_providers, ) def tearDown(self): diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index d6e1082779..c2aae8f54c 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -30,7 +30,7 @@ class FileConsumerTests(unittest.TestCase): @defer.inlineCallbacks def test_pull_consumer(self): string_file = StringIO() - consumer = BackgroundFileConsumer(string_file) + consumer = BackgroundFileConsumer(string_file, reactor=reactor) try: producer = DummyPullProducer() @@ -54,7 +54,7 @@ class FileConsumerTests(unittest.TestCase): @defer.inlineCallbacks def test_push_consumer(self): string_file = BlockingStringWrite() - consumer = BackgroundFileConsumer(string_file) + consumer = BackgroundFileConsumer(string_file, reactor=reactor) try: producer = NonCallableMock(spec_set=[]) @@ -80,7 +80,7 @@ class FileConsumerTests(unittest.TestCase): @defer.inlineCallbacks def test_push_producer_feedback(self): string_file = BlockingStringWrite() - consumer = BackgroundFileConsumer(string_file) + consumer = BackgroundFileConsumer(string_file, reactor=reactor) try: producer = NonCallableMock(spec_set=["pauseProducing", "resumeProducing"]) diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 4865eb4bc6..bf7e3aa885 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -12,10 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util import async, logcontext + +from synapse.util import logcontext, Clock from tests import unittest -from twisted.internet import defer +from twisted.internet import defer, reactor from synapse.util.async import Linearizer from six.moves import range @@ -53,7 +54,7 @@ class LinearizerTestCase(unittest.TestCase): self.assertEqual( logcontext.LoggingContext.current_context(), lc) if sleep: - yield async.sleep(0) + yield Clock(reactor).sleep(0) self.assertEqual( logcontext.LoggingContext.current_context(), lc) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index ad78d884e0..9cf90fcfc4 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -3,8 +3,7 @@ from twisted.internet import defer from twisted.internet import reactor from .. import unittest -from synapse.util.async import sleep -from synapse.util import logcontext +from synapse.util import logcontext, Clock from synapse.util.logcontext import LoggingContext @@ -22,18 +21,20 @@ class LoggingContextTestCase(unittest.TestCase): @defer.inlineCallbacks def test_sleep(self): + clock = Clock(reactor) + @defer.inlineCallbacks def competing_callback(): with LoggingContext() as competing_context: competing_context.request = "competing" - yield sleep(0) + yield clock.sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) with LoggingContext() as context_one: context_one.request = "one" - yield sleep(0) + yield clock.sleep(0) self._check_test_key("one") def _test_run_in_background(self, function): @@ -87,7 +88,7 @@ class LoggingContextTestCase(unittest.TestCase): def test_run_in_background_with_blocking_fn(self): @defer.inlineCallbacks def blocking_function(): - yield sleep(0) + yield Clock(reactor).sleep(0) return self._test_run_in_background(blocking_function) diff --git a/tests/utils.py b/tests/utils.py index 262c4a5714..189fd2711c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -37,11 +37,15 @@ USE_POSTGRES_FOR_TESTS = False @defer.inlineCallbacks -def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): +def setup_test_homeserver(name="test", datastore=None, config=None, reactor=None, + **kargs): """Setup a homeserver suitable for running tests against. Keyword arguments are passed to the Homeserver constructor. If no datastore is supplied a datastore backed by an in-memory sqlite db will be given to the HS. """ + if reactor is None: + from twisted.internet import reactor + if config is None: config = Mock() config.signing_key = [MockKey()] @@ -110,6 +114,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): database_engine=db_engine, room_list_handler=object(), tls_server_context_factory=Mock(), + reactor=reactor, **kargs ) db_conn = hs.get_db_conn() -- cgit 1.5.1 From 0495fe00350a3610cbfd9b0097ff1a8a1c31f5f4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 22 Jun 2018 10:42:28 +0100 Subject: Indirect evt_count updates via method call so that we can stub it for the sentinel and not have a billion failing UTs --- synapse/storage/events_worker.py | 2 +- synapse/util/logcontext.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index cf4efa9d12..3a634842ca 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -147,7 +147,7 @@ class EventsWorkerStore(SQLBaseStore): if missing_events_ids: log_ctx = LoggingContext.current_context() - log_ctx.evt_db_fetch_count += len(missing_events_ids) + log_ctx.record_event_fetch(len(missing_events_ids)) missing_events = yield self._enqueue_events( missing_events_ids, diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 2bfd4bee96..df2b71b791 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -91,6 +91,9 @@ class LoggingContext(object): def add_database_scheduled(self, sched_sec): pass + def record_event_fetch(self, event_count): + pass + def __nonzero__(self): return False __bool__ = __nonzero__ # python3 @@ -247,6 +250,14 @@ class LoggingContext(object): """ self.db_sched_duration_sec += sched_sec + def record_event_fetch(self, event_count): + """Record a number of events being fetched from the db + + Args: + event_count (int): number of events being fetched + """ + self.evt_db_fetch_count += event_count + class LoggingContextFilter(logging.Filter): """Logging filter that adds values from the current logging context to each -- cgit 1.5.1 From 43e02c409d163700a293ae67015584699d557c3c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 11 Jun 2018 23:13:06 +0100 Subject: Disable partial state group caching for wildcard lookups When _get_state_for_groups is given a wildcard filter, just do a complete lookup. Hopefully this will give us the best of both worlds by not filling up the ram if we only need one or two keys, but also making the cache still work for the federation reader usecase. --- synapse/storage/state.py | 56 +++++++++++++++++++++++++-------- synapse/util/caches/dictionary_cache.py | 25 +++++++-------- tests/util/test_dict_cache.py | 12 +++---- 3 files changed, 61 insertions(+), 32 deletions(-) (limited to 'synapse/util') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 986a20400c..cd9821c270 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -526,10 +526,23 @@ class StateGroupWorkerStore(SQLBaseStore): @defer.inlineCallbacks def _get_state_for_groups(self, groups, types=None): - """Given list of groups returns dict of group -> list of state events - with matching types. `types` is a list of `(type, state_key)`, where - a `state_key` of None matches all state_keys. If `types` is None then - all events are returned. + """Gets the state at each of a list of state groups, optionally + filtering by type/state_key + + Args: + groups (iterable[int]): list of state groups for which we want + to get the state. + types (None|iterable[(str, None|str)]): + indicates the state type/keys required. If None, the whole + state is fetched and returned. + + Otherwise, each entry should be a `(type, state_key)` tuple to + include in the response. A `state_key` of None is a wildcard + meaning that we require all state with that type. + + Returns: + Deferred[dict[int, dict[(type, state_key), EventBase]]] + a dictionary mapping from state group to state dictionary. """ if types: types = frozenset(types) @@ -538,7 +551,7 @@ class StateGroupWorkerStore(SQLBaseStore): if types is not None: for group in set(groups): state_dict_ids, _, got_all = self._get_some_state_from_cache( - group, types + group, types, ) results[group] = state_dict_ids @@ -559,22 +572,40 @@ class StateGroupWorkerStore(SQLBaseStore): # Okay, so we have some missing_types, lets fetch them. cache_seq_num = self._state_group_cache.sequence + # the DictionaryCache knows if it has *all* the state, but + # does not know if it has all of the keys of a particular type, + # which makes wildcard lookups expensive unless we have a complete + # cache. Hence, if we are doing a wildcard lookup, populate the + # cache fully so that we can do an efficient lookup next time. + + if types and any(k is None for (t, k) in types): + types_to_fetch = None + else: + types_to_fetch = types + group_to_state_dict = yield self._get_state_groups_from_groups( - missing_groups, types + missing_groups, types_to_fetch, ) - # Now we want to update the cache with all the things we fetched - # from the database. for group, group_state_dict in iteritems(group_to_state_dict): state_dict = results[group] - state_dict.update(group_state_dict) + # update the result, filtering by `types`. + if types: + for k, v in iteritems(group_state_dict): + (typ, _) = k + if k in types or (typ, None) in types: + state_dict[k] = v + else: + state_dict.update(group_state_dict) + + # update the cache with all the things we fetched from the + # database. self._state_group_cache.update( cache_seq_num, key=group, - value=state_dict, - full=(types is None), - known_absent=types, + value=group_state_dict, + fetched_keys=types_to_fetch, ) defer.returnValue(results) @@ -681,7 +712,6 @@ class StateGroupWorkerStore(SQLBaseStore): self._state_group_cache.sequence, key=state_group, value=dict(current_state_ids), - full=True, ) return state_group diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index bdc21e348f..95793d466d 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -107,29 +107,28 @@ class DictionaryCache(object): self.sequence += 1 self.cache.clear() - def update(self, sequence, key, value, full=False, known_absent=None): + def update(self, sequence, key, value, fetched_keys=None): """Updates the entry in the cache Args: sequence - key - value (dict): The value to update the cache with. - full (bool): Whether the given value is the full dict, or just a - partial subset there of. If not full then any existing entries - for the key will be updated. - known_absent (set): Set of keys that we know don't exist in the full - dict. + key (K) + value (dict[X,Y]): The value to update the cache with. + fetched_keys (None|set[X]): All of the dictionary keys which were + fetched from the database. + + If None, this is the complete value for key K. Otherwise, it + is used to infer a list of keys which we know don't exist in + the full dict. """ self.check_thread() if self.sequence == sequence: # Only update the cache if the caches sequence number matches the # number that the cache had before the SELECT was started (SYN-369) - if known_absent is None: - known_absent = set() - if full: - self._insert(key, value, known_absent) + if fetched_keys is None: + self._insert(key, value, set()) else: - self._update_or_insert(key, value, known_absent) + self._update_or_insert(key, value, fetched_keys) def _update_or_insert(self, key, value, known_absent): # We pop and reinsert as we need to tell the cache the size may have diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index bc92f85fa6..543ac5bed9 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -32,7 +32,7 @@ class DictCacheTestCase(unittest.TestCase): seq = self.cache.sequence test_value = {"test": "test_simple_cache_hit_full"} - self.cache.update(seq, key, test_value, full=True) + self.cache.update(seq, key, test_value) c = self.cache.get(key) self.assertEqual(test_value, c.value) @@ -44,7 +44,7 @@ class DictCacheTestCase(unittest.TestCase): test_value = { "test": "test_simple_cache_hit_partial" } - self.cache.update(seq, key, test_value, full=True) + self.cache.update(seq, key, test_value) c = self.cache.get(key, ["test"]) self.assertEqual(test_value, c.value) @@ -56,7 +56,7 @@ class DictCacheTestCase(unittest.TestCase): test_value = { "test": "test_simple_cache_miss_partial" } - self.cache.update(seq, key, test_value, full=True) + self.cache.update(seq, key, test_value) c = self.cache.get(key, ["test2"]) self.assertEqual({}, c.value) @@ -70,7 +70,7 @@ class DictCacheTestCase(unittest.TestCase): "test2": "test_simple_cache_hit_miss_partial2", "test3": "test_simple_cache_hit_miss_partial3", } - self.cache.update(seq, key, test_value, full=True) + self.cache.update(seq, key, test_value) c = self.cache.get(key, ["test2"]) self.assertEqual({"test2": "test_simple_cache_hit_miss_partial2"}, c.value) @@ -82,13 +82,13 @@ class DictCacheTestCase(unittest.TestCase): test_value_1 = { "test": "test_simple_cache_hit_miss_partial", } - self.cache.update(seq, key, test_value_1, full=False) + self.cache.update(seq, key, test_value_1, fetched_keys=set("test")) seq = self.cache.sequence test_value_2 = { "test2": "test_simple_cache_hit_miss_partial2", } - self.cache.update(seq, key, test_value_2, full=False) + self.cache.update(seq, key, test_value_2, fetched_keys=set("test2")) c = self.cache.get(key) self.assertEqual( -- cgit 1.5.1 From 07cad26d65dcf237643a43ff2bdfac73ca7a7257 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 25 Jun 2018 14:08:28 +0100 Subject: Remove all global reactor imports & pass it around explicitly (#3424) --- synapse/handlers/deactivate_account.py | 4 ++-- synapse/handlers/message.py | 4 ++-- synapse/handlers/presence.py | 4 ++-- synapse/http/endpoint.py | 16 +++++++++------- synapse/push/emailpusher.py | 4 ++-- synapse/push/httppusher.py | 6 ++++-- synapse/replication/tcp/client.py | 6 +++--- synapse/replication/tcp/resource.py | 4 ++-- synapse/util/__init__.py | 3 +++ 9 files changed, 29 insertions(+), 22 deletions(-) (limited to 'synapse/util') diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 8ec5ba2012..6116842764 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor +from twisted.internet import defer from ._base import BaseHandler from synapse.types import UserID, create_requester @@ -39,7 +39,7 @@ class DeactivateAccountHandler(BaseHandler): # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). - reactor.callWhenRunning(self._start_user_parting) + hs.get_reactor().callWhenRunning(self._start_user_parting) @defer.inlineCallbacks def deactivate_account(self, user_id): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7b9946ab91..a812117dea 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -20,7 +20,7 @@ import sys from canonicaljson import encode_canonical_json import six from six import string_types, itervalues, iteritems -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.defer import succeed from twisted.python.failure import Failure @@ -157,7 +157,7 @@ class MessageHandler(BaseHandler): # remove the purge from the list 24 hours after it completes def clear_purge(): del self._purges_by_id[purge_id] - reactor.callLater(24 * 3600, clear_purge) + self.hs.get_reactor().callLater(24 * 3600, clear_purge) def get_purge_status(self, purge_id): """Get the current status of an active purge diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 7fe568132f..7db59fba00 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -22,7 +22,7 @@ The methods that define policy are: - should_notify """ -from twisted.internet import defer, reactor +from twisted.internet import defer from contextlib import contextmanager from six import itervalues, iteritems @@ -179,7 +179,7 @@ class PresenceHandler(object): # have not yet been persisted self.unpersisted_users_changes = set() - reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown) + hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown) self.serial_to_user = {} self._next_serial = 1 diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 87a482650d..928c1c7407 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.error import ConnectError from twisted.names import client, dns from twisted.names.error import DNSNameError, DomainError @@ -78,17 +78,18 @@ def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None, else: return _WrappingEndpointFac(transport_endpoint( reactor, domain, port, **endpoint_kw_args - )) + ), reactor) class _WrappingEndpointFac(object): - def __init__(self, endpoint_fac): + def __init__(self, endpoint_fac, reactor): self.endpoint_fac = endpoint_fac + self.reactor = reactor @defer.inlineCallbacks def connect(self, protocolFactory): conn = yield self.endpoint_fac.connect(protocolFactory) - conn = _WrappedConnection(conn) + conn = _WrappedConnection(conn, self.reactor) defer.returnValue(conn) @@ -98,9 +99,10 @@ class _WrappedConnection(object): """ __slots__ = ["conn", "last_request"] - def __init__(self, conn): + def __init__(self, conn, reactor): object.__setattr__(self, "conn", conn) object.__setattr__(self, "last_request", time.time()) + self._reactor = reactor def __getattr__(self, name): return getattr(self.conn, name) @@ -131,14 +133,14 @@ class _WrappedConnection(object): # Time this connection out if we haven't send a request in the last # N minutes # TODO: Cancel the previous callLater? - reactor.callLater(3 * 60, self._time_things_out_maybe) + self._reactor.callLater(3 * 60, self._time_things_out_maybe) d = self.conn.request(request) def update_request_time(res): self.last_request = time.time() # TODO: Cancel the previous callLater? - reactor.callLater(3 * 60, self._time_things_out_maybe) + self._reactor.callLater(3 * 60, self._time_things_out_maybe) return res d.addCallback(update_request_time) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index ba7286cb72..52d4f087ee 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled import logging @@ -199,7 +199,7 @@ class EmailPusher(object): self.timed_call = None if soonest_due_at is not None: - self.timed_call = reactor.callLater( + self.timed_call = self.hs.get_reactor().callLater( self.seconds_until(soonest_due_at), self.on_timer ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index bf7ff74a1a..7a481b5a1e 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled from . import push_rule_evaluator @@ -220,7 +220,9 @@ class HttpPusher(object): ) else: logger.info("Push failed: delaying for %ds", self.backoff_delay) - self.timed_call = reactor.callLater(self.backoff_delay, self.on_timer) + self.timed_call = self.hs.get_reactor().callLater( + self.backoff_delay, self.on_timer + ) self.backoff_delay = min(self.backoff_delay * 2, self.MAX_BACKOFF_SEC) break diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 6d2513c4e2..bb852b00af 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -15,7 +15,7 @@ """A replication client for use by synapse workers. """ -from twisted.internet import reactor, defer +from twisted.internet import defer from twisted.internet.protocol import ReconnectingClientFactory from .commands import ( @@ -44,7 +44,7 @@ class ReplicationClientFactory(ReconnectingClientFactory): self.server_name = hs.config.server_name self._clock = hs.get_clock() # As self.clock is defined in super class - reactor.addSystemEventTrigger("before", "shutdown", self.stopTrying) + hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying) def startedConnecting(self, connector): logger.info("Connecting to replication: %r", connector.getDestination()) @@ -95,7 +95,7 @@ class ReplicationClientHandler(object): factory = ReplicationClientFactory(hs, client_name, self) host = hs.config.worker_replication_host port = hs.config.worker_replication_port - reactor.connectTCP(host, port, factory) + hs.get_reactor().connectTCP(host, port, factory) def on_rdata(self, stream_name, token, rows): """Called when we get new replication data. By default this just pokes diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 63bd6d2652..95ad8c1b4c 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -15,7 +15,7 @@ """The server side of the replication stream. """ -from twisted.internet import defer, reactor +from twisted.internet import defer from twisted.internet.protocol import Factory from .streams import STREAMS_MAP, FederationStream @@ -109,7 +109,7 @@ class ReplicationStreamer(object): self.is_looping = False self.pending_updates = False - reactor.addSystemEventTrigger("before", "shutdown", self.on_shutdown) + hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.on_shutdown) def on_shutdown(self): # close all connections on shutdown diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 2a3df7c71d..e9886ef299 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -34,6 +34,9 @@ def unwrapFirstError(failure): class Clock(object): """ A Clock wraps a Twisted reactor and provides utilities on top of it. + + Args: + reactor: The Twisted reactor to use. """ _reactor = attr.ib() -- cgit 1.5.1 From abfe4b2957645194c59da359779a4d77dcc6f493 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 26 Jun 2018 17:25:34 +0100 Subject: try and make loading items from the cache faster --- synapse/util/caches/stream_change_cache.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 817118e30f..c81518c6bc 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -78,7 +78,8 @@ class StreamChangeCache(object): not_known_entities = set(entities) - set(self._entity_to_key) result = ( - set(self._cache.values()[self._cache.bisect_right(stream_pos) :]) + {self._cache[k] for k in in self._cache.islice( + start=self._cache.bisect_right(stream_pos))} .intersection(entities) .union(not_known_entities) ) @@ -113,7 +114,8 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - return self._cache.values()[self._cache.bisect_right(stream_pos) :] + return {self._cache[k] for k in in self._cache.islice( + start=self._cache.bisect_right(stream_pos))} else: return None -- cgit 1.5.1 From bd3d329c88a59fa1adff8354d7e7105e721e2e9e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 26 Jun 2018 17:28:12 +0100 Subject: fixes --- changelog.d/3447.misc | 0 synapse/util/caches/stream_change_cache.py | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3447.misc (limited to 'synapse/util') diff --git a/changelog.d/3447.misc b/changelog.d/3447.misc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index c81518c6bc..b19feb3b11 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -78,7 +78,7 @@ class StreamChangeCache(object): not_known_entities = set(entities) - set(self._entity_to_key) result = ( - {self._cache[k] for k in in self._cache.islice( + {self._cache[k] for k in self._cache.islice( start=self._cache.bisect_right(stream_pos))} .intersection(entities) .union(not_known_entities) @@ -114,7 +114,7 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - return {self._cache[k] for k in in self._cache.islice( + return {self._cache[k] for k in self._cache.islice( start=self._cache.bisect_right(stream_pos))} else: return None -- cgit 1.5.1 From 1202508067d392cbc71e1c9e0b4d9eca3494143a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 26 Jun 2018 17:29:01 +0100 Subject: fixes --- synapse/util/caches/stream_change_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index b19feb3b11..0fb8620001 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -114,8 +114,8 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - return {self._cache[k] for k in self._cache.islice( - start=self._cache.bisect_right(stream_pos))} + return [self._cache[k] for k in self._cache.islice( + start=self._cache.bisect_right(stream_pos))] else: return None -- cgit 1.5.1 From 8057489b260a19cdd36b2f4ac5b587565ca7aac0 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 26 Jun 2018 18:09:01 +0100 Subject: Revert "Try to not use as much CPU in the StreamChangeCache" --- changelog.d/3447.misc | 0 synapse/util/caches/stream_change_cache.py | 6 ++---- 2 files changed, 2 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/3447.misc (limited to 'synapse/util') diff --git a/changelog.d/3447.misc b/changelog.d/3447.misc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 0fb8620001..817118e30f 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -78,8 +78,7 @@ class StreamChangeCache(object): not_known_entities = set(entities) - set(self._entity_to_key) result = ( - {self._cache[k] for k in self._cache.islice( - start=self._cache.bisect_right(stream_pos))} + set(self._cache.values()[self._cache.bisect_right(stream_pos) :]) .intersection(entities) .union(not_known_entities) ) @@ -114,8 +113,7 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - return [self._cache[k] for k in self._cache.islice( - start=self._cache.bisect_right(stream_pos))] + return self._cache.values()[self._cache.bisect_right(stream_pos) :] else: return None -- cgit 1.5.1 From 72d2143ea8fe43464b1db59936acef53293b1b2a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 28 Jun 2018 11:04:18 +0100 Subject: Revert "Revert "Try to not use as much CPU in the StreamChangeCache"" (#3454) --- changelog.d/3447.misc | 0 synapse/util/caches/stream_change_cache.py | 6 ++++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3447.misc (limited to 'synapse/util') diff --git a/changelog.d/3447.misc b/changelog.d/3447.misc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 817118e30f..0fb8620001 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -78,7 +78,8 @@ class StreamChangeCache(object): not_known_entities = set(entities) - set(self._entity_to_key) result = ( - set(self._cache.values()[self._cache.bisect_right(stream_pos) :]) + {self._cache[k] for k in self._cache.islice( + start=self._cache.bisect_right(stream_pos))} .intersection(entities) .union(not_known_entities) ) @@ -113,7 +114,8 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - return self._cache.values()[self._cache.bisect_right(stream_pos) :] + return [self._cache[k] for k in self._cache.islice( + start=self._cache.bisect_right(stream_pos))] else: return None -- cgit 1.5.1 From 6350bf925e8651f2fae70a1e7eb7182e9161c34a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 28 Jun 2018 14:49:57 +0100 Subject: Attempt to be more performant on PyPy (#3462) --- changelog.d/3462.feature | 1 + synapse/api/errors.py | 3 ++- synapse/api/filtering.py | 3 ++- synapse/crypto/keyclient.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/handlers/auth.py | 5 +++-- synapse/handlers/e2e_keys.py | 5 ++--- synapse/handlers/identity.py | 2 +- synapse/handlers/message.py | 5 ++--- synapse/http/client.py | 2 +- synapse/http/matrixfederationclient.py | 3 +-- synapse/http/server.py | 5 ++--- synapse/http/servlet.py | 6 ++++-- synapse/metrics/__init__.py | 3 ++- synapse/replication/tcp/commands.py | 16 ++++++++++------ synapse/rest/client/v1/login.py | 3 ++- synapse/rest/client/v1/room.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/media/v0/content_repository.py | 3 ++- synapse/rest/media/v1/preview_url_resource.py | 3 ++- synapse/storage/account_data.py | 3 ++- synapse/storage/appservice.py | 2 +- synapse/storage/background_updates.py | 3 ++- synapse/storage/deviceinbox.py | 13 +++++++------ synapse/storage/devices.py | 3 ++- synapse/storage/end_to_end_keys.py | 3 +-- synapse/storage/event_push_actions.py | 3 ++- synapse/storage/events.py | 3 ++- synapse/storage/events_worker.py | 3 ++- synapse/storage/filtering.py | 3 +-- synapse/storage/group_server.py | 2 +- synapse/storage/push_rule.py | 3 ++- synapse/storage/pusher.py | 3 +-- synapse/storage/receipts.py | 3 ++- synapse/storage/room.py | 3 ++- synapse/storage/roommember.py | 2 +- synapse/storage/search.py | 2 +- synapse/storage/tags.py | 3 ++- synapse/storage/transactions.py | 3 +-- synapse/util/frozenutils.py | 2 +- 40 files changed, 79 insertions(+), 62 deletions(-) create mode 100644 changelog.d/3462.feature (limited to 'synapse/util') diff --git a/changelog.d/3462.feature b/changelog.d/3462.feature new file mode 100644 index 0000000000..305dbbeddd --- /dev/null +++ b/changelog.d/3462.feature @@ -0,0 +1 @@ +Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). \ No newline at end of file diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e6ad3768f0..227a0713b2 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -17,7 +17,8 @@ import logging -import simplejson as json +from canonicaljson import json + from six import iteritems from six.moves import http_client diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index dbc0e7e445..aae25e7a47 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -17,7 +17,8 @@ from synapse.storage.presence import UserPresenceState from synapse.types import UserID, RoomID from twisted.internet import defer -import simplejson as json +from canonicaljson import json + import jsonschema from jsonschema import FormatChecker diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index f1fd488b90..2a0eddbea1 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -18,7 +18,7 @@ from twisted.web.http import HTTPClient from twisted.internet.protocol import Factory from twisted.internet import defer, reactor from synapse.http.endpoint import matrix_federation_endpoint -import simplejson as json +from canonicaljson import json import logging diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d4dd967c60..a00420a24b 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -import simplejson as json +from canonicaljson import json from twisted.internet import defer from synapse.api.errors import AuthError, FederationError, SynapseError, NotFoundError diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a131b7f73f..cbef1f2770 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -16,6 +16,8 @@ from twisted.internet import defer, threads +from canonicaljson import json + from ._base import BaseHandler from synapse.api.constants import LoginType from synapse.api.errors import ( @@ -32,7 +34,6 @@ from twisted.web.client import PartialDownloadError import logging import bcrypt import pymacaroons -import simplejson import attr import synapse.util.stringutils as stringutils @@ -403,7 +404,7 @@ class AuthHandler(BaseHandler): except PartialDownloadError as pde: # Twisted is silly data = pde.response - resp_body = simplejson.loads(data) + resp_body = json.loads(data) if 'success' in resp_body: # Note that we do NOT check the hostname here: we explicitly diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 4d7bf5defd..62b4892a4e 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -14,10 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import simplejson as json import logging -from canonicaljson import encode_canonical_json +from canonicaljson import encode_canonical_json, json from twisted.internet import defer from six import iteritems @@ -357,7 +356,7 @@ def _exception_to_failure(e): # include ConnectionRefused and other errors # # Note that some Exceptions (notably twisted's ResponseFailed etc) don't - # give a string for e.message, which simplejson then fails to serialize. + # give a string for e.message, which json then fails to serialize. return { "status": 503, "message": str(e.message), } diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index f00dfe1d3e..277c2b7760 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -19,7 +19,7 @@ import logging -import simplejson as json +from canonicaljson import json from twisted.internet import defer diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8467284758..cbadf3c88e 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -14,10 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import simplejson import sys -from canonicaljson import encode_canonical_json +from canonicaljson import encode_canonical_json, json import six from six import string_types, itervalues, iteritems from twisted.internet import defer @@ -797,7 +796,7 @@ class EventCreationHandler(object): # Ensure that we can round trip before trying to persist in db try: dump = frozendict_json_encoder.encode(event.content) - simplejson.loads(dump) + json.loads(dump) except Exception: logger.exception("Failed to encode content: %r", event.content) raise diff --git a/synapse/http/client.py b/synapse/http/client.py index 46ffb41de1..5bdc484c15 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -42,7 +42,7 @@ from twisted.web._newclient import ResponseDone from six import StringIO from prometheus_client import Counter -import simplejson as json +from canonicaljson import json import logging import urllib diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 4e0399e762..2cb9e3e231 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -27,7 +27,7 @@ from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable import synapse.util.retryutils -from canonicaljson import encode_canonical_json +from canonicaljson import encode_canonical_json, json from synapse.api.errors import ( SynapseError, Codes, HttpResponseException, FederationDeniedError, @@ -36,7 +36,6 @@ from synapse.api.errors import ( from signedjson.sign import sign_json import cgi -import simplejson as json import logging import random import sys diff --git a/synapse/http/server.py b/synapse/http/server.py index bc09b8b2be..517aaf7b5a 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -29,7 +29,7 @@ import synapse.metrics import synapse.events from canonicaljson import ( - encode_canonical_json, encode_pretty_printed_json + encode_canonical_json, encode_pretty_printed_json, json ) from twisted.internet import defer @@ -41,7 +41,6 @@ from twisted.web.util import redirectTo import collections import logging import urllib -import simplejson logger = logging.getLogger(__name__) @@ -410,7 +409,7 @@ def respond_with_json(request, code, json_object, send_cors=False, if canonical_json or synapse.events.USE_FROZEN_DICTS: json_bytes = encode_canonical_json(json_object) else: - json_bytes = simplejson.dumps(json_object) + json_bytes = json.dumps(json_object) return respond_with_json_bytes( request, code, json_bytes, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index ef8e62901b..ef3a01ddc7 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -18,7 +18,9 @@ from synapse.api.errors import SynapseError, Codes import logging -import simplejson + +from canonicaljson import json + logger = logging.getLogger(__name__) @@ -171,7 +173,7 @@ def parse_json_value_from_request(request, allow_empty_body=False): return None try: - content = simplejson.loads(content_bytes) + content = json.loads(content_bytes) except Exception as e: logger.warn("Unable to parse JSON: %s", e) raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 7d6e0232ed..2d2397caae 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -147,7 +147,8 @@ class GCCounts(object): yield cm -REGISTRY.register(GCCounts()) +if not running_on_pypy: + REGISTRY.register(GCCounts()) # # Twisted reactor metrics diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 12aac3cc6b..f3908df642 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -19,13 +19,17 @@ allowed to be sent by which side. """ import logging -import simplejson +import platform +if platform.python_implementation() == "PyPy": + import json + _json_encoder = json.JSONEncoder() +else: + import simplejson as json + _json_encoder = json.JSONEncoder(namedtuple_as_object=False) logger = logging.getLogger(__name__) -_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False) - class Command(object): """The base command class. @@ -102,7 +106,7 @@ class RdataCommand(Command): return cls( stream_name, None if token == "batch" else int(token), - simplejson.loads(row_json) + json.loads(row_json) ) def to_line(self): @@ -300,7 +304,7 @@ class InvalidateCacheCommand(Command): def from_line(cls, line): cache_func, keys_json = line.split(" ", 1) - return cls(cache_func, simplejson.loads(keys_json)) + return cls(cache_func, json.loads(keys_json)) def to_line(self): return " ".join(( @@ -329,7 +333,7 @@ class UserIpCommand(Command): def from_line(cls, line): user_id, jsn = line.split(" ", 1) - access_token, ip, user_agent, device_id, last_seen = simplejson.loads(jsn) + access_token, ip, user_agent, device_id, last_seen = json.loads(jsn) return cls( user_id, access_token, ip, user_agent, device_id, last_seen diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 34df5be4e9..88ca5184cd 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -23,7 +23,8 @@ from synapse.util.msisdn import phone_number_to_msisdn from .base import ClientV1RestServlet, client_path_patterns -import simplejson as json +from canonicaljson import json + import urllib from six.moves.urllib import parse as urlparse diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 0b984987ed..e6ae5db79b 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -31,7 +31,7 @@ from synapse.http.servlet import ( from six.moves.urllib import parse as urlparse import logging -import simplejson as json +from canonicaljson import json logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a291cffbf1..d2aa47b326 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -33,7 +33,7 @@ from ._base import set_timeline_upper_limit import itertools import logging -import simplejson as json +from canonicaljson import json logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index 956bd5da75..e44d4276d2 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -22,8 +22,9 @@ from synapse.api.errors import ( from twisted.protocols.basic import FileSender from twisted.web import server, resource +from canonicaljson import json + import base64 -import simplejson as json import logging import os import re diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 565cef2b8d..adca490640 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -23,7 +23,8 @@ import re import shutil import sys import traceback -import simplejson as json + +from canonicaljson import json from six.moves import urllib_parse as urlparse from six import string_types diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 284ec3c970..7034a61399 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -22,8 +22,9 @@ from synapse.storage.util.id_generators import StreamIdGenerator from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from canonicaljson import json + import abc -import simplejson as json import logging logger = logging.getLogger(__name__) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 12ea8a158c..4d32d0bdf6 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -15,8 +15,8 @@ # limitations under the License. import logging import re -import simplejson as json from twisted.internet import defer +from canonicaljson import json from synapse.appservice import AppServiceTransaction from synapse.config.appservice import load_appservices diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index b7e9c716c8..af18964510 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -18,7 +18,8 @@ from . import engines from twisted.internet import defer -import simplejson as json +from canonicaljson import json + import logging logger = logging.getLogger(__name__) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index a879e5bfc1..38addbf9c0 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -14,7 +14,8 @@ # limitations under the License. import logging -import simplejson + +from canonicaljson import json from twisted.internet import defer @@ -85,7 +86,7 @@ class DeviceInboxStore(BackgroundUpdateStore): ) rows = [] for destination, edu in remote_messages_by_destination.items(): - edu_json = simplejson.dumps(edu) + edu_json = json.dumps(edu) rows.append((destination, stream_id, now_ms, edu_json)) txn.executemany(sql, rows) @@ -177,7 +178,7 @@ class DeviceInboxStore(BackgroundUpdateStore): " WHERE user_id = ?" ) txn.execute(sql, (user_id,)) - message_json = simplejson.dumps(messages_by_device["*"]) + message_json = json.dumps(messages_by_device["*"]) for row in txn: # Add the message for all devices for this user on this # server. @@ -199,7 +200,7 @@ class DeviceInboxStore(BackgroundUpdateStore): # Only insert into the local inbox if the device exists on # this server device = row[0] - message_json = simplejson.dumps(messages_by_device[device]) + message_json = json.dumps(messages_by_device[device]) messages_json_for_user[device] = message_json if messages_json_for_user: @@ -253,7 +254,7 @@ class DeviceInboxStore(BackgroundUpdateStore): messages = [] for row in txn: stream_pos = row[0] - messages.append(simplejson.loads(row[1])) + messages.append(json.loads(row[1])) if len(messages) < limit: stream_pos = current_stream_id return (messages, stream_pos) @@ -389,7 +390,7 @@ class DeviceInboxStore(BackgroundUpdateStore): messages = [] for row in txn: stream_pos = row[0] - messages.append(simplejson.loads(row[1])) + messages.append(json.loads(row[1])) if len(messages) < limit: stream_pos = current_stream_id return (messages, stream_pos) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d149d8392e..2ed9ada783 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import simplejson as json from twisted.internet import defer @@ -21,6 +20,8 @@ from synapse.api.errors import StoreError from ._base import SQLBaseStore, Cache from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks +from canonicaljson import json + from six import itervalues, iteritems logger = logging.getLogger(__name__) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index b146487943..181047c8b7 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -16,8 +16,7 @@ from twisted.internet import defer from synapse.util.caches.descriptors import cached -from canonicaljson import encode_canonical_json -import simplejson as json +from canonicaljson import encode_canonical_json, json from ._base import SQLBaseStore diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 8cb24b7d59..05cb3f61ce 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -19,7 +19,8 @@ from twisted.internet import defer from synapse.util.caches.descriptors import cachedInlineCallbacks import logging -import simplejson as json + +from canonicaljson import json from six import iteritems diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 7d0e59538a..d816d4883c 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,8 @@ from functools import wraps import itertools import logging -import simplejson as json +from canonicaljson import json + from twisted.internet import defer from synapse.storage.events_worker import EventsWorkerStore diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index f6a6e46b43..896225aab9 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -29,7 +29,8 @@ from synapse.api.errors import SynapseError from collections import namedtuple import logging -import simplejson as json + +from canonicaljson import json # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index 2e2763126d..eae6027cee 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -19,8 +19,7 @@ from ._base import SQLBaseStore from synapse.api.errors import SynapseError, Codes from synapse.util.caches.descriptors import cachedInlineCallbacks -from canonicaljson import encode_canonical_json -import simplejson as json +from canonicaljson import encode_canonical_json, json class FilteringStore(SQLBaseStore): diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index da05ccb027..b77402d295 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -20,7 +20,7 @@ from synapse.api.errors import SynapseError from ._base import SQLBaseStore -import simplejson as json +from canonicaljson import json # The category ID for the "default" category. We don't store as null in the diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 04a0b59a39..9e52e992b3 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -25,9 +25,10 @@ from synapse.push.baserules import list_with_base_rules from synapse.api.constants import EventTypes from twisted.internet import defer +from canonicaljson import json + import abc import logging -import simplejson as json logger = logging.getLogger(__name__) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 307660b99a..c6def861cf 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -17,12 +17,11 @@ from ._base import SQLBaseStore from twisted.internet import defer -from canonicaljson import encode_canonical_json +from canonicaljson import encode_canonical_json, json from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList import logging -import simplejson as json import types logger = logging.getLogger(__name__) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c93c228f6e..f230a3bab7 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -21,9 +21,10 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer +from canonicaljson import json + import abc import logging -import simplejson as json logger = logging.getLogger(__name__) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index ea6a189185..ca0eb187e5 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -20,9 +20,10 @@ from synapse.storage._base import SQLBaseStore from synapse.storage.search import SearchStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from canonicaljson import json + import collections import logging -import simplejson as json import re logger = logging.getLogger(__name__) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 829cc4a207..8fc9549a75 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -28,7 +28,7 @@ from synapse.api.constants import Membership, EventTypes from synapse.types import get_domain_from_id import logging -import simplejson as json +from canonicaljson import json from six import itervalues, iteritems diff --git a/synapse/storage/search.py b/synapse/storage/search.py index f0fa5d7631..9b77c45318 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -16,7 +16,7 @@ from collections import namedtuple import logging import re -import simplejson as json +from canonicaljson import json from six import string_types diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 6671d3cfca..04d123ed95 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -19,7 +19,8 @@ from synapse.storage.account_data import AccountDataWorkerStore from synapse.util.caches.descriptors import cached from twisted.internet import defer -import simplejson as json +from canonicaljson import json + import logging from six.moves import range diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index e485d19b84..acbc03446e 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -19,12 +19,11 @@ from synapse.util.caches.descriptors import cached from twisted.internet import defer import six -from canonicaljson import encode_canonical_json +from canonicaljson import encode_canonical_json, json from collections import namedtuple import logging -import simplejson as json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 15f0a7ba9e..535e7d0e7a 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -14,7 +14,7 @@ # limitations under the License. from frozendict import frozendict -import simplejson as json +from canonicaljson import json from six import string_types -- cgit 1.5.1 From 49af4020190eae6b0c65897d96cd2be286364d2b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 9 Jul 2018 16:09:20 +1000 Subject: run isort --- synapse/api/auth.py | 7 +-- synapse/api/errors.py | 4 +- synapse/api/filtering.py | 14 ++--- synapse/api/urls.py | 2 +- synapse/app/__init__.py | 4 +- synapse/app/_base.py | 11 ++-- synapse/app/appservice.py | 5 +- synapse/app/client_reader.py | 5 +- synapse/app/event_creator.py | 9 ++-- synapse/app/federation_reader.py | 5 +- synapse/app/federation_sender.py | 5 +- synapse/app/frontend_proxy.py | 9 ++-- synapse/app/homeserver.py | 33 +++++++----- synapse/app/media_repository.py | 9 ++-- synapse/app/pusher.py | 5 +- synapse/app/synchrotron.py | 11 ++-- synapse/app/synctl.py | 5 +- synapse/app/user_dir.py | 5 +- synapse/appservice/__init__.py | 12 ++--- synapse/appservice/api.py | 14 ++--- synapse/appservice/scheduler.py | 4 +- synapse/config/_base.py | 3 +- synapse/config/api.py | 4 +- synapse/config/appservice.py | 14 ++--- synapse/config/homeserver.py | 36 ++++++------- synapse/config/jwt.py | 1 - synapse/config/key.py | 21 ++++---- synapse/config/logger.py | 6 ++- synapse/config/password_auth_providers.py | 4 +- synapse/config/registration.py | 4 +- synapse/config/repository.py | 2 +- synapse/config/server.py | 1 + synapse/config/server_notices_config.py | 3 +- synapse/config/tls.py | 11 ++-- synapse/crypto/context_factory.py | 6 +-- synapse/crypto/event_signing.py | 10 ++-- synapse/crypto/keyclient.py | 14 ++--- synapse/crypto/keyring.py | 44 ++++++++-------- synapse/event_auth.py | 6 +-- synapse/events/__init__.py | 3 +- synapse/events/builder.py | 5 +- synapse/events/snapshot.py | 4 +- synapse/events/utils.py | 9 ++-- synapse/events/validator.py | 8 +-- synapse/federation/federation_base.py | 7 +-- synapse/federation/federation_client.py | 14 ++--- synapse/federation/federation_server.py | 18 +++---- synapse/federation/persistence.py | 5 +- synapse/federation/send_queue.py | 14 ++--- synapse/federation/transaction_queue.py | 30 +++++------ synapse/federation/transport/client.py | 9 ++-- synapse/federation/transport/server.py | 21 ++++---- synapse/federation/units.py | 3 +- synapse/groups/attestations.py | 5 +- synapse/groups/groups_server.py | 7 +-- synapse/handlers/__init__.py | 10 ++-- synapse/handlers/_base.py | 3 +- synapse/handlers/admin.py | 4 +- synapse/handlers/appservice.py | 13 +++-- synapse/handlers/auth.py | 26 +++++----- synapse/handlers/deactivate_account.py | 7 +-- synapse/handlers/device.py | 16 +++--- synapse/handlers/devicemessage.py | 3 +- synapse/handlers/directory.py | 9 ++-- synapse/handlers/e2e_keys.py | 10 ++-- synapse/handlers/events.py | 15 +++--- synapse/handlers/federation.py | 40 ++++++++------- synapse/handlers/groups_local.py | 7 +-- synapse/handlers/identity.py | 7 ++- synapse/handlers/initial_sync.py | 9 ++-- synapse/handlers/message.py | 23 ++++----- synapse/handlers/presence.py | 21 ++++---- synapse/handlers/profile.py | 3 +- synapse/handlers/read_marker.py | 5 +- synapse/handlers/receipts.py | 10 ++-- synapse/handlers/register.py | 11 ++-- synapse/handlers/room.py | 19 +++---- synapse/handlers/room_list.py | 20 ++++---- synapse/handlers/room_member.py | 14 +++-- synapse/handlers/room_member_worker.py | 5 +- synapse/handlers/search.py | 17 +++---- synapse/handlers/set_password.py | 1 + synapse/handlers/sync.py | 24 ++++----- synapse/handlers/typing.py | 11 ++-- synapse/handlers/user_directory.py | 7 +-- synapse/http/additional_resource.py | 3 +- synapse/http/client.py | 48 +++++++++--------- synapse/http/endpoint.py | 12 ++--- synapse/http/matrixfederationclient.py | 48 +++++++++--------- synapse/http/request_metrics.py | 2 +- synapse/http/server.py | 38 +++++++------- synapse/http/servlet.py | 3 +- synapse/http/site.py | 2 +- synapse/metrics/__init__.py | 11 ++-- synapse/notifier.py | 21 ++++---- synapse/push/action_generator.py | 6 +-- synapse/push/baserules.py | 3 +- synapse/push/bulk_push_rule_evaluator.py | 17 ++++--- synapse/push/clientformat.py | 6 +-- synapse/push/emailpusher.py | 7 ++- synapse/push/httppusher.py | 6 +-- synapse/push/mailer.py | 33 ++++++------ synapse/push/presentable_names.py | 6 +-- synapse/push/push_rule_evaluator.py | 4 +- synapse/push/push_tools.py | 5 +- synapse/push/pusher.py | 3 +- synapse/replication/http/__init__.py | 1 - synapse/replication/http/membership.py | 4 +- synapse/replication/http/send_event.py | 12 +++-- synapse/replication/slave/storage/_base.py | 4 +- synapse/replication/slave/storage/appservice.py | 3 +- synapse/replication/slave/storage/client_ips.py | 3 +- synapse/replication/slave/storage/deviceinbox.py | 7 +-- synapse/replication/slave/storage/devices.py | 5 +- synapse/replication/slave/storage/directory.py | 3 +- synapse/replication/slave/storage/events.py | 3 +- synapse/replication/slave/storage/filtering.py | 3 +- synapse/replication/slave/storage/groups.py | 5 +- synapse/replication/slave/storage/keys.py | 3 +- synapse/replication/slave/storage/presence.py | 8 +-- synapse/replication/slave/storage/push_rule.py | 5 +- synapse/replication/slave/storage/pushers.py | 4 +- synapse/replication/slave/storage/receipts.py | 4 +- synapse/replication/slave/storage/registration.py | 3 +- synapse/replication/slave/storage/room.py | 3 +- synapse/replication/slave/storage/transactions.py | 3 +- synapse/replication/tcp/client.py | 9 ++-- synapse/replication/tcp/protocol.py | 40 +++++++++------ synapse/replication/tcp/resource.py | 17 ++++--- synapse/replication/tcp/streams.py | 5 +- synapse/rest/__init__.py | 55 ++++++++------------ synapse/rest/client/v1/admin.py | 10 ++-- synapse/rest/client/v1/base.py | 10 ++-- synapse/rest/client/v1/directory.py | 9 ++-- synapse/rest/client/v1/events.py | 8 +-- synapse/rest/client/v1/initial_sync.py | 1 + synapse/rest/client/v1/login.py | 30 +++++------ synapse/rest/client/v1/logout.py | 5 +- synapse/rest/client/v1/presence.py | 13 ++--- synapse/rest/client/v1/profile.py | 5 +- synapse/rest/client/v1/push_rule.py | 16 +++--- synapse/rest/client/v1/pusher.py | 15 +++--- synapse/rest/client/v1/register.py | 19 +++---- synapse/rest/client/v1/room.py | 24 +++++---- synapse/rest/client/v1/voip.py | 9 ++-- synapse/rest/client/v2_alpha/account.py | 5 +- synapse/rest/client/v2_alpha/account_data.py | 10 ++-- synapse/rest/client/v2_alpha/auth.py | 5 +- synapse/rest/client/v2_alpha/devices.py | 1 + synapse/rest/client/v2_alpha/filter.py | 10 ++-- synapse/rest/client/v2_alpha/groups.py | 4 +- synapse/rest/client/v2_alpha/keys.py | 7 ++- synapse/rest/client/v2_alpha/notifications.py | 11 ++-- synapse/rest/client/v2_alpha/openid.py | 10 ++-- synapse/rest/client/v2_alpha/read_marker.py | 6 +-- synapse/rest/client/v2_alpha/receipts.py | 6 +-- synapse/rest/client/v2_alpha/register.py | 22 ++++---- synapse/rest/client/v2_alpha/report_event.py | 6 +-- synapse/rest/client/v2_alpha/sync.py | 27 +++++----- synapse/rest/client/v2_alpha/tags.py | 10 ++-- synapse/rest/client/v2_alpha/thirdparty.py | 1 + synapse/rest/client/v2_alpha/user_directory.py | 1 + synapse/rest/client/versions.py | 4 +- synapse/rest/consent/consent_resource.py | 12 ++--- synapse/rest/key/v1/server_key_resource.py | 10 ++-- synapse/rest/key/v2/__init__.py | 1 + synapse/rest/key/v2/local_key_resource.py | 10 ++-- synapse/rest/key/v2/remote_key_resource.py | 16 +++--- synapse/rest/media/v0/content_repository.py | 19 +++---- synapse/rest/media/v1/_base.py | 19 +++---- synapse/rest/media/v1/download_resource.py | 6 +-- synapse/rest/media/v1/filepath.py | 2 +- synapse/rest/media/v1/identicon_resource.py | 1 + synapse/rest/media/v1/media_repository.py | 51 ++++++++++--------- synapse/rest/media/v1/media_storage.py | 17 +++---- synapse/rest/media/v1/preview_url_resource.py | 25 ++++----- synapse/rest/media/v1/storage_provider.py | 11 ++-- synapse/rest/media/v1/thumbnail_resource.py | 11 ++-- synapse/rest/media/v1/thumbnailer.py | 4 +- synapse/rest/media/v1/upload_resource.py | 5 +- synapse/server.py | 34 ++++++------- synapse/server_notices/consent_server_notices.py | 3 +- synapse/state.py | 22 ++++---- synapse/storage/__init__.py | 59 ++++++++++------------ synapse/storage/_base.py | 19 ++++--- synapse/storage/account_data.py | 13 +++-- synapse/storage/appservice.py | 6 ++- synapse/storage/background_updates.py | 10 ++-- synapse/storage/client_ips.py | 9 ++-- synapse/storage/deviceinbox.py | 3 +- synapse/storage/devices.py | 11 ++-- synapse/storage/directory.py | 9 ++-- synapse/storage/end_to_end_keys.py | 8 +-- synapse/storage/engines/__init__.py | 7 ++- synapse/storage/engines/sqlite3.py | 4 +- synapse/storage/event_federation.py | 16 +++--- synapse/storage/event_push_actions.py | 11 ++-- synapse/storage/events.py | 34 ++++++------- synapse/storage/events_worker.py | 25 +++++---- synapse/storage/filtering.py | 7 +-- synapse/storage/group_server.py | 5 +- synapse/storage/keys.py | 14 ++--- synapse/storage/prepare_database.py | 1 - synapse/storage/presence.py | 10 ++-- synapse/storage/profile.py | 2 +- synapse/storage/push_rule.py | 18 ++++--- synapse/storage/pusher.py | 9 ++-- synapse/storage/receipts.py | 16 +++--- synapse/storage/registration.py | 6 +-- synapse/storage/rejections.py | 4 +- synapse/storage/room.py | 12 ++--- synapse/storage/roommember.py | 19 ++++--- synapse/storage/schema/delta/25/fts.py | 6 +-- synapse/storage/schema/delta/27/ts.py | 4 +- synapse/storage/schema/delta/30/as_users.py | 2 +- synapse/storage/schema/delta/31/search_update.py | 7 +-- synapse/storage/schema/delta/33/event_fields.py | 5 +- synapse/storage/schema/delta/33/remote_media_ts.py | 1 - synapse/storage/schema/delta/34/cache_stream.py | 6 +-- .../storage/schema/delta/34/received_txn_purge.py | 4 +- synapse/storage/schema/delta/34/sent_txn_purge.py | 4 +- synapse/storage/schema/delta/37/remove_auth_idx.py | 6 +-- synapse/storage/schema/delta/42/user_dir.py | 2 +- synapse/storage/search.py | 8 +-- synapse/storage/signatures.py | 8 +-- synapse/storage/state.py | 5 +- synapse/storage/stream.py | 16 +++--- synapse/storage/tags.py | 10 ++-- synapse/storage/transactions.py | 11 ++-- synapse/storage/user_directory.py | 14 ++--- synapse/storage/user_erasure_store.py | 2 +- synapse/storage/util/id_generators.py | 2 +- synapse/streams/config.py | 5 +- synapse/streams/events.py | 7 ++- synapse/types.py | 3 +- synapse/util/__init__.py | 1 + synapse/util/async.py | 18 ++++--- synapse/util/caches/__init__.py | 6 +-- synapse/util/caches/descriptors.py | 22 ++++---- synapse/util/caches/dictionary_cache.py | 9 ++-- synapse/util/caches/expiringcache.py | 5 +- synapse/util/caches/lrucache.py | 2 +- synapse/util/caches/stream_change_cache.py | 5 +- synapse/util/file_consumer.py | 4 +- synapse/util/frozenutils.py | 6 +-- synapse/util/httpresourcetree.py | 4 +- synapse/util/logcontext.py | 6 +-- synapse/util/logformatter.py | 3 +- synapse/util/logutils.py | 8 ++- synapse/util/manhole.py | 6 +-- synapse/util/metrics.py | 8 +-- synapse/util/msisdn.py | 1 + synapse/util/ratelimitutils.py | 13 +++-- synapse/util/retryutils.py | 9 ++-- synapse/util/rlimit.py | 3 +- synapse/util/stringutils.py | 1 + synapse/util/versionstring.py | 4 +- synapse/visibility.py | 4 +- tests/__init__.py | 1 + tests/api/test_auth.py | 7 ++- tests/api/test_filtering.py | 15 +++--- tests/appservice/test_appservice.py | 9 ++-- tests/appservice/test_scheduler.py | 15 ++++-- tests/config/test_generate.py | 1 + tests/config/test_load.py | 3 ++ tests/crypto/test_event_signing.py | 10 ++-- tests/crypto/test_keyring.py | 10 ++-- tests/events/test_utils.py | 4 +- tests/federation/test_federation_server.py | 1 + tests/handlers/test_appservice.py | 8 +-- tests/handlers/test_auth.py | 2 + tests/handlers/test_device.py | 2 +- tests/handlers/test_directory.py | 6 +-- tests/handlers/test_e2e_keys.py | 5 +- tests/handlers/test_presence.py | 12 +++-- tests/handlers/test_profile.py | 6 +-- tests/handlers/test_register.py | 5 +- tests/handlers/test_typing.py | 19 ++++--- tests/http/test_endpoint.py | 6 +-- tests/replication/slave/storage/_base.py | 15 +++--- .../replication/slave/storage/test_account_data.py | 4 +- tests/replication/slave/storage/test_events.py | 5 +- tests/replication/slave/storage/test_receipts.py | 4 +- tests/rest/client/test_transactions.py | 7 +-- tests/rest/client/v1/test_events.py | 6 +-- tests/rest/client/v1/test_profile.py | 5 +- tests/rest/client/v1/test_register.py | 10 ++-- tests/rest/client/v1/test_rooms.py | 11 ++-- tests/rest/client/v1/test_typing.py | 7 ++- tests/rest/client/v1/utils.py | 10 ++-- tests/rest/client/v2_alpha/__init__.py | 7 ++- tests/rest/client/v2_alpha/test_filter.py | 11 ++-- tests/rest/client/v2_alpha/test_register.py | 11 ++-- tests/rest/media/v1/test_media_storage.py | 14 ++--- tests/server.py | 10 ++-- tests/storage/test__base.py | 8 +-- tests/storage/test_appservice.py | 20 +++++--- tests/storage/test_background_update.py | 6 +-- tests/storage/test_base.py | 8 +-- tests/storage/test_devices.py | 1 + tests/storage/test_directory.py | 4 +- tests/storage/test_event_push_actions.py | 3 +- tests/storage/test_keys.py | 1 + tests/storage/test_presence.py | 4 +- tests/storage/test_profile.py | 2 +- tests/storage/test_redaction.py | 8 +-- tests/storage/test_registration.py | 2 +- tests/storage/test_room.py | 4 +- tests/storage/test_roommember.py | 8 +-- tests/storage/test_user_directory.py | 1 + tests/test_distributor.py | 7 +-- tests/test_dns.py | 7 +-- tests/test_event_auth.py | 3 +- tests/test_federation.py | 10 ++-- tests/test_preview.py | 7 +-- tests/test_server.py | 3 +- tests/test_state.py | 10 ++-- tests/test_test_utils.py | 1 - tests/test_types.py | 6 +-- tests/util/caches/test_descriptors.py | 7 ++- tests/util/test_dict_cache.py | 4 +- tests/util/test_expiring_cache.py | 4 +- tests/util/test_file_consumer.py | 9 ++-- tests/util/test_limiter.py | 4 +- tests/util/test_linearizer.py | 7 +-- tests/util/test_logcontext.py | 8 +-- tests/util/test_logformatter.py | 1 + tests/util/test_lrucache.py | 4 +- tests/util/test_rwlock.py | 4 +- tests/util/test_snapshot_cache.py | 5 +- tests/util/test_stream_change_cache.py | 3 +- tests/util/test_treecache.py | 4 +- tests/util/test_wheel_timer.py | 4 +- tests/utils.py | 3 +- 334 files changed, 1609 insertions(+), 1528 deletions(-) (limited to 'synapse/util') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 088b4e8b6d..6dec862fec 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -18,15 +18,16 @@ import logging from six import itervalues import pymacaroons -from twisted.internet import defer from netaddr import IPAddress +from twisted.internet import defer + import synapse.types from synapse import event_auth -from synapse.api.constants import EventTypes, Membership, JoinRules +from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes from synapse.types import UserID -from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR +from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches.lrucache import LruCache from synapse.util.metrics import Measure diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 227a0713b2..6074df292f 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -17,11 +17,11 @@ import logging -from canonicaljson import json - from six import iteritems from six.moves import http_client +from canonicaljson import json + logger = logging.getLogger(__name__) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index aae25e7a47..25346baa87 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -12,16 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError -from synapse.storage.presence import UserPresenceState -from synapse.types import UserID, RoomID -from twisted.internet import defer - -from canonicaljson import json - import jsonschema +from canonicaljson import json from jsonschema import FormatChecker +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.storage.presence import UserPresenceState +from synapse.types import RoomID, UserID + FILTER_SCHEMA = { "additionalProperties": False, "type": "object", diff --git a/synapse/api/urls.py b/synapse/api/urls.py index bb46b5da8a..71347912f1 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -15,8 +15,8 @@ # limitations under the License. """Contains the URL paths to prefix various aspects of the server with. """ -from hashlib import sha256 import hmac +from hashlib import sha256 from six.moves.urllib.parse import urlencode diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index 9c2b627590..3b6b9368b8 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -14,9 +14,11 @@ # limitations under the License. import sys + +from synapse import python_dependencies # noqa: E402 + sys.dont_write_bytecode = True -from synapse import python_dependencies # noqa: E402 try: python_dependencies.check_requirements() diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a6925ab139..391bd14c5c 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -17,15 +17,18 @@ import gc import logging import sys +from daemonize import Daemonize + +from twisted.internet import error, reactor + +from synapse.util import PreserveLoggingContext +from synapse.util.rlimit import change_resource_limit + try: import affinity except Exception: affinity = None -from daemonize import Daemonize -from synapse.util import PreserveLoggingContext -from synapse.util.rlimit import change_resource_limit -from twisted.internet import error, reactor logger = logging.getLogger(__name__) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 4319ddce03..9a37384fb7 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -36,8 +39,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor, defer -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.appservice") diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 654ddb8414..b0ea26dcb4 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -44,8 +47,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.client_reader") diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 441467093a..374f115644 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -43,8 +46,10 @@ from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import TransactionStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v1.room import ( - RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet, JoinRoomAliasServlet, + RoomMembershipRestServlet, + RoomSendEventRestServlet, + RoomStateEventRestServlet, ) from synapse.server import HomeServer from synapse.storage.engines import create_engine @@ -52,8 +57,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.event_creator") diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index b2415cc671..7af00b8bcf 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.api.urls import FEDERATION_PREFIX @@ -41,8 +44,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_reader") diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 13d2b70053..18469013fa 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -42,8 +45,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_sender") diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index d2bae4ad03..b5f78f4640 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.api.errors import SynapseError @@ -25,9 +28,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.crypto import context_factory from synapse.http.server import JsonResource -from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, -) +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource @@ -44,8 +45,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.frontend_proxy") diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index ae5fc751d5..14e6dca522 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -18,27 +18,39 @@ import logging import os import sys +from twisted.application import service +from twisted.internet import defer, reactor +from twisted.web.resource import EncodingResourceWrapper, NoResource +from twisted.web.server import GzipEncoderFactory +from twisted.web.static import File + import synapse import synapse.config.logger from synapse import events -from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \ - LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \ - STATIC_PREFIX, WEB_CLIENT_PREFIX +from synapse.api.urls import ( + CONTENT_REPO_PREFIX, + FEDERATION_PREFIX, + LEGACY_MEDIA_PREFIX, + MEDIA_PREFIX, + SERVER_KEY_PREFIX, + SERVER_KEY_V2_PREFIX, + STATIC_PREFIX, + WEB_CLIENT_PREFIX, +) from synapse.app import _base -from synapse.app._base import quit_with_error, listen_ssl, listen_tcp +from synapse.app._base import listen_ssl, listen_tcp, quit_with_error from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.federation.transport.server import TransportLayerServer -from synapse.module_api import ModuleApi from synapse.http.additional_resource import AdditionalResource from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource -from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \ - check_requirements -from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX +from synapse.module_api import ModuleApi +from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements +from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource from synapse.rest.key.v1.server_key_resource import LocalKey @@ -55,11 +67,6 @@ from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string -from twisted.application import service -from twisted.internet import defer, reactor -from twisted.web.resource import EncodingResourceWrapper, NoResource -from twisted.web.server import GzipEncoderFactory -from twisted.web.static import File logger = logging.getLogger("synapse.app.homeserver") diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 19a682cce3..749bbf37d0 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -16,11 +16,12 @@ import logging import sys +from twisted.internet import reactor +from twisted.web.resource import NoResource + import synapse from synapse import events -from synapse.api.urls import ( - CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX -) +from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig @@ -43,8 +44,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.media_repository") diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 13cfbd08b0..9295a51d5b 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -16,6 +16,9 @@ import logging import sys +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -37,8 +40,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.pusher") diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 82f06ea185..26b9ec85f2 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -17,6 +17,11 @@ import contextlib import logging import sys +from six import iteritems + +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse.api.constants import EventTypes from synapse.app import _base @@ -36,12 +41,12 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v1 import events from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet @@ -56,10 +61,6 @@ from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -from six import iteritems logger = logging.getLogger("synapse.app.synchrotron") diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 56ae086128..68acc15a9a 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -16,16 +16,17 @@ import argparse import collections +import errno import glob import os import os.path import signal import subprocess import sys -import yaml -import errno import time +import yaml + SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] GREEN = "\x1b[1;32m" diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index f5726e3df6..637a89530a 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -17,6 +17,9 @@ import logging import sys +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + import synapse from synapse import events from synapse.app import _base @@ -43,8 +46,6 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor, defer -from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.user_dir") diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 328cbfa284..57ed8a3ca2 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -12,17 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.constants import EventTypes -from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.types import GroupID, get_domain_from_id - -from twisted.internet import defer - import logging import re from six import string_types +from twisted.internet import defer + +from synapse.api.constants import EventTypes +from synapse.types import GroupID, get_domain_from_id +from synapse.util.caches.descriptors import cachedInlineCallbacks + logger = logging.getLogger(__name__) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 47251fb6ad..6980e5890e 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -12,19 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import urllib + +from prometheus_client import Counter + from twisted.internet import defer from synapse.api.constants import ThirdPartyEntityKind from synapse.api.errors import CodeMessageException -from synapse.http.client import SimpleHttpClient from synapse.events.utils import serialize_event -from synapse.util.caches.response_cache import ResponseCache +from synapse.http.client import SimpleHttpClient from synapse.types import ThirdPartyInstanceID - -import logging -import urllib - -from prometheus_client import Counter +from synapse.util.caches.response_cache import ResponseCache logger = logging.getLogger(__name__) diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 6eddbc0828..2430814796 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -48,14 +48,14 @@ UP & quit +---------- YES SUCCESS This is all tied together by the AppServiceScheduler which DIs the required components. """ +import logging + from twisted.internet import defer from synapse.appservice import ApplicationServiceState from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index b748ed2b0a..3d2e90dd5b 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -16,11 +16,12 @@ import argparse import errno import os -import yaml from textwrap import dedent from six import integer_types +import yaml + class ConfigError(Exception): pass diff --git a/synapse/config/api.py b/synapse/config/api.py index 20ba33226a..403d96ba76 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config - from synapse.api.constants import EventTypes +from ._base import Config + class ApiConfig(Config): diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 0c27bb2fa7..3b161d708a 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config, ConfigError +import logging -from synapse.appservice import ApplicationService -from synapse.types import UserID +from six import string_types +from six.moves.urllib import parse as urlparse +import yaml from netaddr import IPSet -import yaml -import logging +from synapse.appservice import ApplicationService +from synapse.types import UserID -from six import string_types -from six.moves.urllib import parse as urlparse +from ._base import Config, ConfigError logger = logging.getLogger(__name__) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 1dea2ad024..2fd9c48abf 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -13,32 +13,32 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from .tls import TlsConfig -from .server import ServerConfig -from .logger import LoggingConfig -from .database import DatabaseConfig -from .ratelimiting import RatelimitConfig -from .repository import ContentRepositoryConfig -from .captcha import CaptchaConfig -from .voip import VoipConfig -from .registration import RegistrationConfig -from .metrics import MetricsConfig from .api import ApiConfig from .appservice import AppServiceConfig -from .key import KeyConfig -from .saml2 import SAML2Config +from .captcha import CaptchaConfig from .cas import CasConfig -from .password import PasswordConfig +from .consent_config import ConsentConfig +from .database import DatabaseConfig +from .emailconfig import EmailConfig +from .groups import GroupsConfig from .jwt import JWTConfig +from .key import KeyConfig +from .logger import LoggingConfig +from .metrics import MetricsConfig +from .password import PasswordConfig from .password_auth_providers import PasswordAuthProviderConfig -from .emailconfig import EmailConfig -from .workers import WorkerConfig from .push import PushConfig +from .ratelimiting import RatelimitConfig +from .registration import RegistrationConfig +from .repository import ContentRepositoryConfig +from .saml2 import SAML2Config +from .server import ServerConfig +from .server_notices_config import ServerNoticesConfig from .spam_checker import SpamCheckerConfig -from .groups import GroupsConfig +from .tls import TlsConfig from .user_directory import UserDirectoryConfig -from .consent_config import ConsentConfig -from .server_notices_config import ServerNoticesConfig +from .voip import VoipConfig +from .workers import WorkerConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index 47f145c589..51e7f7e003 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -15,7 +15,6 @@ from ._base import Config, ConfigError - MISSING_JWT = ( """Missing jwt library. This is required for jwt login. diff --git a/synapse/config/key.py b/synapse/config/key.py index d1382ad9ac..279c47bb48 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -13,21 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config, ConfigError +import hashlib +import logging +import os -from synapse.util.stringutils import random_string from signedjson.key import ( - generate_signing_key, is_signing_algorithm_supported, - decode_signing_key_base64, decode_verify_key_bytes, - read_signing_keys, write_signing_keys, NACL_ED25519 + NACL_ED25519, + decode_signing_key_base64, + decode_verify_key_bytes, + generate_signing_key, + is_signing_algorithm_supported, + read_signing_keys, + write_signing_keys, ) from unpaddedbase64 import decode_base64 -from synapse.util.stringutils import random_string_with_symbols -import os -import hashlib -import logging +from synapse.util.stringutils import random_string, random_string_with_symbols +from ._base import Config, ConfigError logger = logging.getLogger(__name__) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 557c270fbe..a87b11a1df 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -16,15 +16,17 @@ import logging import logging.config import os import signal -from string import Template import sys +from string import Template -from twisted.logger import STDLibLogObserver, globalLogBeginner import yaml +from twisted.logger import STDLibLogObserver, globalLogBeginner + import synapse from synapse.util.logcontext import LoggingContextFilter from synapse.util.versionstring import get_version_string + from ._base import Config DEFAULT_LOG_CONFIG = Template(""" diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 6602c5b4c7..f4066abc28 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config - from synapse.util.module_loader import load_module +from ._base import Config + LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider' diff --git a/synapse/config/registration.py b/synapse/config/registration.py index c5384b3ad4..0fb964eb67 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config +from distutils.util import strtobool from synapse.util.stringutils import random_string_with_symbols -from distutils.util import strtobool +from ._base import Config class RegistrationConfig(Config): diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 81ecf9778c..fc909c1fac 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config, ConfigError from collections import namedtuple from synapse.util.module_loader import load_module +from ._base import Config, ConfigError MISSING_NETADDR = ( "Missing netaddr library. This is required for URL preview API." diff --git a/synapse/config/server.py b/synapse/config/server.py index 71fd51e4bc..18102656b0 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -17,6 +17,7 @@ import logging from synapse.http.endpoint import parse_and_validate_server_name + from ._base import Config, ConfigError logger = logging.Logger(__name__) diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index be1d1f762c..3c39850ac6 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -12,9 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config from synapse.types import UserID +from ._base import Config + DEFAULT_CONFIG = """\ # Server Notices room configuration # diff --git a/synapse/config/tls.py b/synapse/config/tls.py index b66154bc7c..fef1ea99cb 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config - -from OpenSSL import crypto -import subprocess import os - +import subprocess from hashlib import sha256 + from unpaddedbase64 import encode_base64 +from OpenSSL import crypto + +from ._base import Config + GENERATE_DH_PARAMS = False diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 0397f73ab4..a1e1d0d33a 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import ssl +import logging + from OpenSSL import SSL, crypto +from twisted.internet import ssl from twisted.internet._sslverify import _defaultCurveName -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index aaa3efaca3..8774b28967 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -15,15 +15,15 @@ # limitations under the License. -from synapse.api.errors import SynapseError, Codes -from synapse.events.utils import prune_event +import hashlib +import logging from canonicaljson import encode_canonical_json -from unpaddedbase64 import encode_base64, decode_base64 from signedjson.sign import sign_json +from unpaddedbase64 import decode_base64, encode_base64 -import hashlib -import logging +from synapse.api.errors import Codes, SynapseError +from synapse.events.utils import prune_event logger = logging.getLogger(__name__) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 2a0eddbea1..668b4f517d 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -13,14 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util import logcontext -from twisted.web.http import HTTPClient -from twisted.internet.protocol import Factory -from twisted.internet import defer, reactor -from synapse.http.endpoint import matrix_federation_endpoint -from canonicaljson import json import logging +from canonicaljson import json + +from twisted.internet import defer, reactor +from twisted.internet.protocol import Factory +from twisted.web.http import HTTPClient + +from synapse.http.endpoint import matrix_federation_endpoint +from synapse.util import logcontext logger = logging.getLogger(__name__) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 9b17ef0a08..e95b9fb43e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -14,35 +14,37 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.crypto.keyclient import fetch_server_key -from synapse.api.errors import SynapseError, Codes -from synapse.util import unwrapFirstError, logcontext -from synapse.util.logcontext import ( - PreserveLoggingContext, - preserve_fn, - run_in_background, -) -from synapse.util.metrics import Measure - -from twisted.internet import defer +import hashlib +import logging +import urllib +from collections import namedtuple -from signedjson.sign import ( - verify_signed_json, signature_ids, sign_json, encode_canonical_json, - SignatureVerifyException, -) from signedjson.key import ( - is_signing_algorithm_supported, decode_verify_key_bytes, + decode_verify_key_bytes, encode_verify_key_base64, + is_signing_algorithm_supported, +) +from signedjson.sign import ( + SignatureVerifyException, + encode_canonical_json, + sign_json, + signature_ids, + verify_signed_json, ) from unpaddedbase64 import decode_base64, encode_base64 from OpenSSL import crypto +from twisted.internet import defer -from collections import namedtuple -import urllib -import hashlib -import logging - +from synapse.api.errors import Codes, SynapseError +from synapse.crypto.keyclient import fetch_server_key +from synapse.util import logcontext, unwrapFirstError +from synapse.util.logcontext import ( + PreserveLoggingContext, + preserve_fn, + run_in_background, +) +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index cdf99fd140..b32f64e729 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -17,11 +17,11 @@ import logging from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes -from signedjson.sign import verify_signed_json, SignatureVerifyException +from signedjson.sign import SignatureVerifyException, verify_signed_json from unpaddedbase64 import decode_base64 -from synapse.api.constants import EventTypes, Membership, JoinRules -from synapse.api.errors import AuthError, SynapseError, EventSizeError +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.errors import AuthError, EventSizeError, SynapseError from synapse.types import UserID, get_domain_from_id logger = logging.getLogger(__name__) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index cb08da4984..51f9084b90 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,9 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.frozenutils import freeze from synapse.util.caches import intern_dict - +from synapse.util.frozenutils import freeze # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents # bugs where we accidentally share e.g. signature dicts. However, converting diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 13fbba68c0..e662eaef10 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -13,13 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import EventBase, FrozenEvent, _event_dict_property +import copy from synapse.types import EventID - from synapse.util.stringutils import random_string -import copy +from . import EventBase, FrozenEvent, _event_dict_property class EventBuilder(EventBase): diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 8e684d91b5..bcd9bb5946 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from frozendict import frozendict +from twisted.internet import defer + class EventContext(object): """ diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 29ae086786..652941ca0d 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.constants import EventTypes -from . import EventBase +import re + +from six import string_types from frozendict import frozendict -import re +from synapse.api.constants import EventTypes -from six import string_types +from . import EventBase # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # (? """ +import logging + +from six import string_types + from twisted.internet import defer -from synapse.api.errors import SynapseError, AuthError -from synapse.types import UserID +from synapse.api.errors import AuthError, SynapseError from synapse.handlers.presence import format_user_presence_state from synapse.http.servlet import parse_json_object_from_request -from .base import ClientV1RestServlet, client_path_patterns - -from six import string_types +from synapse.types import UserID -import logging +from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index e4e3611a14..a23edd8fe5 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -16,9 +16,10 @@ """ This module contains REST servlets to do with profile: /profile/ """ from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns -from synapse.types import UserID from synapse.http.servlet import parse_json_object_from_request +from synapse.types import UserID + +from .base import ClientV1RestServlet, client_path_patterns class ProfileDisplaynameRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 6bb4821ec6..0df7ce570f 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -16,16 +16,18 @@ from twisted.internet import defer from synapse.api.errors import ( - SynapseError, UnrecognizedRequestError, NotFoundError, StoreError + NotFoundError, + StoreError, + SynapseError, + UnrecognizedRequestError, ) -from .base import ClientV1RestServlet, client_path_patterns -from synapse.storage.push_rule import ( - InconsistentRuleException, RuleNotFoundException -) -from synapse.push.clientformat import format_push_rules_for_user +from synapse.http.servlet import parse_json_value_from_request from synapse.push.baserules import BASE_RULE_IDS +from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP -from synapse.http.servlet import parse_json_value_from_request +from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException + +from .base import ClientV1RestServlet, client_path_patterns class PushRuleRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 40e523cc5f..1581f88db5 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -13,20 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes -from synapse.push import PusherConfigException +from synapse.api.errors import Codes, StoreError, SynapseError +from synapse.http.server import finish_request from synapse.http.servlet import ( - parse_json_object_from_request, parse_string, RestServlet + RestServlet, + parse_json_object_from_request, + parse_string, ) -from synapse.http.server import finish_request -from synapse.api.errors import StoreError +from synapse.push import PusherConfigException from .base import ClientV1RestServlet, client_path_patterns -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index c10320dedf..3ce5f8b726 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -14,21 +14,22 @@ # limitations under the License. """This module contains REST servlets to do with registration: /register""" +import hmac +import logging +from hashlib import sha1 + +from six import string_types + from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes -from synapse.api.constants import LoginType -from synapse.api.auth import get_access_token_from_request -from .base import ClientV1RestServlet, client_path_patterns import synapse.util.stringutils as stringutils +from synapse.api.auth import get_access_token_from_request +from synapse.api.constants import LoginType +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import parse_json_object_from_request from synapse.types import create_requester -from hashlib import sha1 -import hmac -import logging - -from six import string_types +from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index e6ae5db79b..2470db52ba 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -15,23 +15,27 @@ # limitations under the License. """ This module contains REST servlets to do with rooms: /rooms/ """ +import logging + +from six.moves.urllib import parse as urlparse + +from canonicaljson import json + from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns -from synapse.api.errors import SynapseError, Codes, AuthError -from synapse.streams.config import PaginationConfig from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.filtering import Filter -from synapse.types import UserID, RoomID, RoomAlias, ThirdPartyInstanceID -from synapse.events.utils import serialize_event, format_event_for_client_v2 +from synapse.events.utils import format_event_for_client_v2, serialize_event from synapse.http.servlet import ( - parse_json_object_from_request, parse_string, parse_integer + parse_integer, + parse_json_object_from_request, + parse_string, ) +from synapse.streams.config import PaginationConfig +from synapse.types import RoomAlias, RoomID, ThirdPartyInstanceID, UserID -from six.moves.urllib import parse as urlparse - -import logging -from canonicaljson import json +from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index c43b30b73a..62f4c3d93e 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -13,16 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 +import hashlib +import hmac + from twisted.internet import defer from .base import ClientV1RestServlet, client_path_patterns -import hmac -import hashlib -import base64 - - class VoipRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/voip/turnServer$") diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 80dbc3c92e..528c1f43f9 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -17,17 +17,20 @@ import logging from six.moves import http_client + from twisted.internet import defer from synapse.api.auth import has_access_token from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import ( - RestServlet, assert_params_in_request, + RestServlet, + assert_params_in_request, parse_json_object_from_request, ) from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.threepids import check_3pid_allowed + from ._base import client_v2_patterns, interactive_auth_handler logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 0e0a187efd..371e9aa354 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import client_v2_patterns - -from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.api.errors import AuthError, SynapseError +import logging from twisted.internet import defer -import logging +from synapse.api.errors import AuthError, SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request + +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index d6f3a19648..bd8b5f4afa 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.api.constants import LoginType @@ -23,9 +25,6 @@ from synapse.http.servlet import RestServlet from ._base import client_v2_patterns -import logging - - logger = logging.getLogger(__name__) RECAPTCHA_TEMPLATE = """ diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 35d58b367a..09f6a8efe3 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.api import errors from synapse.http import servlet + from ._base import client_v2_patterns, interactive_auth_handler logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 1b9dc4528d..ae86728879 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -13,17 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from synapse.api.errors import AuthError, SynapseError, StoreError, Codes +from synapse.api.errors import AuthError, Codes, StoreError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID -from ._base import client_v2_patterns -from ._base import set_timeline_upper_limit - -import logging - +from ._base import client_v2_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 3bb1ec2af6..21e02c07c0 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -21,8 +23,6 @@ from synapse.types import GroupID from ._base import client_v2_patterns -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 3cc87ea63f..8486086b51 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -19,10 +19,13 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, parse_integer + RestServlet, + parse_integer, + parse_json_object_from_request, + parse_string, ) -from synapse.http.servlet import parse_string from synapse.types import StreamToken + from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 66583d6778..2a6ea3df5f 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -13,19 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from synapse.http.servlet import ( - RestServlet, parse_string, parse_integer -) from synapse.events.utils import ( - serialize_event, format_event_for_client_v2_without_room_id, + format_event_for_client_v2_without_room_id, + serialize_event, ) +from synapse.http.servlet import RestServlet, parse_integer, parse_string from ._base import client_v2_patterns -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index aa1cae8e1e..01c90aa2a3 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -14,15 +14,15 @@ # limitations under the License. -from ._base import client_v2_patterns +import logging + +from twisted.internet import defer -from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.api.errors import AuthError +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.stringutils import random_string -from twisted.internet import defer - -import logging +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 2f8784fe06..a6e582a5ae 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -13,13 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns - -import logging +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 1fbff2edd8..de370cac45 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns - -import logging +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 97e7c0f7c6..896650d5a5 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -14,29 +14,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +import hmac +import logging +from hashlib import sha1 + +from six import string_types + from twisted.internet import defer import synapse import synapse.types from synapse.api.auth import get_access_token_from_request, has_access_token from synapse.api.constants import LoginType -from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError +from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string + RestServlet, + assert_params_in_request, + parse_json_object_from_request, + parse_string, ) from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.threepids import check_3pid_allowed from ._base import client_v2_patterns, interactive_auth_handler -import logging -import hmac -from hashlib import sha1 -from synapse.util.ratelimitutils import FederationRateLimiter - -from six import string_types - - # We ought to be using hmac.compare_digest() but on older pythons it doesn't # exist. It's a _really minor_ security flaw to use plain string comparison # because the timing attack is so obscured by all the other code here it's diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 8903e12405..08bb8e04fd 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -13,13 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns - -import logging +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index d2aa47b326..8aa06faf23 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -13,27 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools +import logging + +from canonicaljson import json + from twisted.internet import defer -from synapse.http.servlet import ( - RestServlet, parse_string, parse_integer, parse_boolean +from synapse.api.constants import PresenceState +from synapse.api.errors import SynapseError +from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection +from synapse.events.utils import ( + format_event_for_client_v2_without_room_id, + serialize_event, ) from synapse.handlers.presence import format_user_presence_state from synapse.handlers.sync import SyncConfig +from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.types import StreamToken -from synapse.events.utils import ( - serialize_event, format_event_for_client_v2_without_room_id, -) -from synapse.api.filtering import FilterCollection, DEFAULT_FILTER_COLLECTION -from synapse.api.errors import SynapseError -from synapse.api.constants import PresenceState -from ._base import client_v2_patterns -from ._base import set_timeline_upper_limit -import itertools -import logging - -from canonicaljson import json +from ._base import client_v2_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index dac8603b07..4fea614e95 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import client_v2_patterns - -from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.api.errors import AuthError +import logging from twisted.internet import defer -import logging +from synapse.api.errors import AuthError +from synapse.http.servlet import RestServlet, parse_json_object_from_request + +from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 6773b9ba60..d9d379182e 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.constants import ThirdPartyEntityKind from synapse.http.servlet import RestServlet + from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 2d4a43c353..cac0624ba7 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request + from ._base import client_v2_patterns logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 2ecb15deee..6ac2987b98 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.servlet import RestServlet - import logging import re +from synapse.http.servlet import RestServlet + logger = logging.getLogger(__name__) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 724911d1e6..147ff7d79b 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -13,28 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from hashlib import sha256 import hmac import logging +from hashlib import sha256 from os import path + from six.moves import http_client import jinja2 from jinja2 import TemplateNotFound + from twisted.internet import defer from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET -from synapse.api.errors import NotFoundError, SynapseError, StoreError +from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError -from synapse.http.server import ( - finish_request, - wrap_html_request_handler, -) +from synapse.http.server import finish_request, wrap_html_request_handler from synapse.http.servlet import parse_string from synapse.types import UserID - # language to use for the templates. TODO: figure this out from Accept-Language TEMPLATE_LANGUAGE = "en" diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index 1498d188c1..b9ee6e1c13 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -14,14 +14,16 @@ # limitations under the License. -from twisted.web.resource import Resource -from synapse.http.server import respond_with_json_bytes +import logging + +from canonicaljson import encode_canonical_json from signedjson.sign import sign_json from unpaddedbase64 import encode_base64 -from canonicaljson import encode_canonical_json + from OpenSSL import crypto -import logging +from twisted.web.resource import Resource +from synapse.http.server import respond_with_json_bytes logger = logging.getLogger(__name__) diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index a07224148c..3491fd2118 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. from twisted.web.resource import Resource + from .local_key_resource import LocalKey from .remote_key_resource import RemoteKey diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index 04775b3c45..ec0ec7b431 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -14,13 +14,15 @@ # limitations under the License. -from twisted.web.resource import Resource -from synapse.http.server import respond_with_json_bytes +import logging + +from canonicaljson import encode_canonical_json from signedjson.sign import sign_json from unpaddedbase64 import encode_base64 -from canonicaljson import encode_canonical_json -import logging +from twisted.web.resource import Resource + +from synapse.http.server import respond_with_json_bytes logger = logging.getLogger(__name__) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 21b4c1175e..7d67e4b064 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -12,20 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import ( - respond_with_json_bytes, wrap_json_request_handler, -) -from synapse.http.servlet import parse_integer, parse_json_object_from_request -from synapse.api.errors import SynapseError, Codes -from synapse.crypto.keyring import KeyLookupError +import logging +from io import BytesIO +from twisted.internet import defer from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET -from twisted.internet import defer +from synapse.api.errors import Codes, SynapseError +from synapse.crypto.keyring import KeyLookupError +from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler +from synapse.http.servlet import parse_integer, parse_json_object_from_request -from io import BytesIO -import logging logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index e44d4276d2..f255f2883f 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -13,22 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import respond_with_json_bytes, finish_request - -from synapse.api.errors import ( - Codes, cs_error -) - -from twisted.protocols.basic import FileSender -from twisted.web import server, resource - -from canonicaljson import json - import base64 import logging import os import re +from canonicaljson import json + +from twisted.protocols.basic import FileSender +from twisted.web import resource, server + +from synapse.api.errors import Codes, cs_error +from synapse.http.server import finish_request, respond_with_json_bytes + logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index c0d2f06855..65f4bd2910 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -13,23 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import respond_with_json, finish_request -from synapse.api.errors import ( - cs_error, Codes, SynapseError -) -from synapse.util import logcontext +import logging +import os +import urllib + +from six.moves.urllib import parse as urlparse from twisted.internet import defer from twisted.protocols.basic import FileSender +from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.http.server import finish_request, respond_with_json +from synapse.util import logcontext from synapse.util.stringutils import is_ascii -import os - -import logging -import urllib -from six.moves.urllib import parse as urlparse - logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 8cf8820c31..fbfa85f74f 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -18,11 +18,9 @@ from twisted.internet import defer from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET -from synapse.http.server import ( - set_cors_headers, - wrap_json_request_handler, -) import synapse.http.servlet +from synapse.http.server import set_cors_headers, wrap_json_request_handler + from ._base import parse_media_id, respond_404 logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index d5164e47e0..c8586fa280 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools import os import re -import functools NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py index 66f2b6bd30..a2e391415f 100644 --- a/synapse/rest/media/v1/identicon_resource.py +++ b/synapse/rest/media/v1/identicon_resource.py @@ -13,6 +13,7 @@ # limitations under the License. from pydenticon import Generator + from twisted.web.resource import Resource FOREGROUND = [ diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 218ba7a083..30242c525a 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -14,41 +14,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, threads +import cgi +import errno +import logging +import os +import shutil + +from six import iteritems +from six.moves.urllib import parse as urlparse + import twisted.internet.error import twisted.web.http +from twisted.internet import defer, threads from twisted.web.resource import Resource -from ._base import respond_404, FileInfo, respond_with_responder -from .upload_resource import UploadResource -from .download_resource import DownloadResource -from .thumbnail_resource import ThumbnailResource -from .identicon_resource import IdenticonResource -from .preview_url_resource import PreviewUrlResource -from .filepath import MediaFilePaths -from .thumbnailer import Thumbnailer -from .storage_provider import StorageProviderWrapper -from .media_storage import MediaStorage - -from synapse.http.matrixfederationclient import MatrixFederationHttpClient -from synapse.util.stringutils import random_string from synapse.api.errors import ( - SynapseError, HttpResponseException, NotFoundError, FederationDeniedError, + FederationDeniedError, + HttpResponseException, + NotFoundError, + SynapseError, ) - +from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.util.async import Linearizer -from synapse.util.stringutils import is_ascii from synapse.util.logcontext import make_deferred_yieldable from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import is_ascii, random_string -import os -import errno -import shutil - -import cgi -import logging -from six.moves.urllib import parse as urlparse -from six import iteritems +from ._base import FileInfo, respond_404, respond_with_responder +from .download_resource import DownloadResource +from .filepath import MediaFilePaths +from .identicon_resource import IdenticonResource +from .media_storage import MediaStorage +from .preview_url_resource import PreviewUrlResource +from .storage_provider import StorageProviderWrapper +from .thumbnail_resource import ThumbnailResource +from .thumbnailer import Thumbnailer +from .upload_resource import UploadResource logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index d6b8ebbedb..b25993fcb5 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -13,22 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, threads -from twisted.protocols.basic import FileSender +import contextlib +import logging +import os +import shutil +import sys import six -from ._base import Responder +from twisted.internet import defer, threads +from twisted.protocols.basic import FileSender from synapse.util.file_consumer import BackgroundFileConsumer from synapse.util.logcontext import make_deferred_yieldable -import contextlib -import os -import logging -import shutil -import sys - +from ._base import Responder logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index adca490640..4e3a18ce08 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -24,31 +24,28 @@ import shutil import sys import traceback -from canonicaljson import json - -from six.moves import urllib_parse as urlparse from six import string_types +from six.moves import urllib_parse as urlparse + +from canonicaljson import json -from twisted.web.server import NOT_DONE_YET from twisted.internet import defer from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET -from ._base import FileInfo - -from synapse.api.errors import ( - SynapseError, Codes, -) -from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from synapse.util.stringutils import random_string -from synapse.util.caches.expiringcache import ExpiringCache +from synapse.api.errors import Codes, SynapseError from synapse.http.client import SpiderHttpClient from synapse.http.server import ( - respond_with_json_bytes, respond_with_json, + respond_with_json_bytes, wrap_json_request_handler, ) from synapse.util.async import ObservableDeferred -from synapse.util.stringutils import is_ascii +from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.util.stringutils import is_ascii, random_string + +from ._base import FileInfo logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 0252afd9d3..7b9f8b4d79 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -13,17 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, threads +import logging +import os +import shutil -from .media_storage import FileResponder +from twisted.internet import defer, threads from synapse.config._base import Config from synapse.util.logcontext import run_in_background -import logging -import os -import shutil - +from .media_storage import FileResponder logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index aae6e464e8..5305e9175f 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -20,13 +20,14 @@ from twisted.internet import defer from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET -from synapse.http.server import ( - set_cors_headers, - wrap_json_request_handler, -) +from synapse.http.server import set_cors_headers, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_string + from ._base import ( - FileInfo, parse_media_id, respond_404, respond_with_file, + FileInfo, + parse_media_id, + respond_404, + respond_with_file, respond_with_responder, ) diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index e1ee535b9a..a4b26c2587 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import PIL.Image as Image +import logging from io import BytesIO -import logging +import PIL.Image as Image logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 7567476fce..1a98120e1d 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -20,10 +20,7 @@ from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET from synapse.api.errors import SynapseError -from synapse.http.server import ( - respond_with_json, - wrap_json_request_handler, -) +from synapse.http.server import respond_with_json, wrap_json_request_handler logger = logging.getLogger(__name__) diff --git a/synapse/server.py b/synapse/server.py index c29c19289a..92bea96c5c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -33,19 +33,30 @@ from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.spamcheck import SpamChecker from synapse.federation.federation_client import FederationClient -from synapse.federation.federation_server import FederationServer +from synapse.federation.federation_server import ( + FederationHandlerRegistry, + FederationServer, +) from synapse.federation.send_queue import FederationRemoteSendQueue -from synapse.federation.federation_server import FederationHandlerRegistry -from synapse.federation.transport.client import TransportLayerClient from synapse.federation.transaction_queue import TransactionQueue +from synapse.federation.transport.client import TransportLayerClient +from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer +from synapse.groups.groups_server import GroupsServerHandler from synapse.handlers import Handlers from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator from synapse.handlers.deactivate_account import DeactivateAccountHandler -from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.device import DeviceHandler +from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.e2e_keys import E2eKeysHandler +from synapse.handlers.events import EventHandler, EventStreamHandler +from synapse.handlers.groups_local import GroupsLocalHandler +from synapse.handlers.initial_sync import InitialSyncHandler +from synapse.handlers.message import EventCreationHandler from synapse.handlers.presence import PresenceHandler +from synapse.handlers.profile import ProfileHandler +from synapse.handlers.read_marker import ReadMarkerHandler +from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.room import RoomCreationHandler from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import RoomMemberMasterHandler @@ -53,17 +64,8 @@ from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.set_password import SetPasswordHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import TypingHandler -from synapse.handlers.events import EventHandler, EventStreamHandler -from synapse.handlers.initial_sync import InitialSyncHandler -from synapse.handlers.receipts import ReceiptsHandler -from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.user_directory import UserDirectoryHandler -from synapse.handlers.groups_local import GroupsLocalHandler -from synapse.handlers.profile import ProfileHandler -from synapse.handlers.message import EventCreationHandler -from synapse.groups.groups_server import GroupsServerHandler -from synapse.groups.attestations import GroupAttestionRenewer, GroupAttestationSigning -from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory +from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator @@ -74,9 +76,7 @@ from synapse.rest.media.v1.media_repository import ( ) from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender -from synapse.server_notices.worker_server_notices_sender import ( - WorkerServerNoticesSender, -) +from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender from synapse.state import StateHandler, StateResolutionHandler from synapse.storage import DataStore from synapse.streams.events import EventSources diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index bb74af1af5..5e3044d164 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -14,7 +14,8 @@ # limitations under the License. import logging -from six import (iteritems, string_types) +from six import iteritems, string_types + from twisted.internet import defer from synapse.api.errors import SynapseError diff --git a/synapse/state.py b/synapse/state.py index 8098db94b4..15a593d41c 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -14,25 +14,25 @@ # limitations under the License. +import hashlib +import logging +from collections import namedtuple + +from six import iteritems, itervalues + +from frozendict import frozendict + from twisted.internet import defer from synapse import event_auth -from synapse.util.logutils import log_function -from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.metrics import Measure from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.events.snapshot import EventContext from synapse.util.async import Linearizer from synapse.util.caches import CACHE_SIZE_FACTOR - -from collections import namedtuple -from frozendict import frozendict - -import logging -import hashlib - -from six import iteritems, itervalues +from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.logutils import log_function +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index e843b702b9..ba88a54979 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -15,51 +15,48 @@ # limitations under the License. import datetime -from dateutil import tz -import time import logging +import time +from dateutil import tz + +from synapse.api.constants import PresenceState from synapse.storage.devices import DeviceStore from synapse.storage.user_erasure_store import UserErasureStore -from .appservice import ( - ApplicationServiceStore, ApplicationServiceTransactionStore -) +from synapse.util.caches.stream_change_cache import StreamChangeCache + +from .account_data import AccountDataStore +from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore +from .client_ips import ClientIpStore +from .deviceinbox import DeviceInboxStore from .directory import DirectoryStore +from .end_to_end_keys import EndToEndKeyStore +from .engines import PostgresEngine +from .event_federation import EventFederationStore +from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .filtering import FilteringStore +from .group_server import GroupServerStore +from .keys import KeyStore +from .media_repository import MediaRepositoryStore +from .openid import OpenIdStore from .presence import PresenceStore, UserPresenceState from .profile import ProfileStore +from .push_rule import PushRuleStore +from .pusher import PusherStore +from .receipts import ReceiptsStore from .registration import RegistrationStore +from .rejections import RejectionsStore from .room import RoomStore from .roommember import RoomMemberStore -from .stream import StreamStore -from .transactions import TransactionStore -from .keys import KeyStore -from .event_federation import EventFederationStore -from .pusher import PusherStore -from .push_rule import PushRuleStore -from .media_repository import MediaRepositoryStore -from .rejections import RejectionsStore -from .event_push_actions import EventPushActionsStore -from .deviceinbox import DeviceInboxStore -from .group_server import GroupServerStore -from .state import StateStore -from .signatures import SignatureStore -from .filtering import FilteringStore -from .end_to_end_keys import EndToEndKeyStore - -from .receipts import ReceiptsStore from .search import SearchStore +from .signatures import SignatureStore +from .state import StateStore +from .stream import StreamStore from .tags import TagsStore -from .account_data import AccountDataStore -from .openid import OpenIdStore -from .client_ips import ClientIpStore +from .transactions import TransactionStore from .user_directory import UserDirectoryStore - -from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator -from .engines import PostgresEngine - -from synapse.api.constants import PresenceState -from synapse.util.caches.stream_change_cache import StreamChangeCache +from .util.id_generators import ChainedIdGenerator, IdGenerator, StreamIdGenerator logger = logging.getLogger(__name__) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 22d6257a9f..1fd5d8f162 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -13,22 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import sys +import threading +import time -from synapse.api.errors import StoreError -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext -from synapse.util.caches.descriptors import Cache -from synapse.storage.engines import PostgresEngine +from six import iteritems, iterkeys, itervalues +from six.moves import intern, range from prometheus_client import Histogram from twisted.internet import defer -import sys -import time -import threading - -from six import itervalues, iterkeys, iteritems -from six.moves import intern, range +from synapse.api.errors import StoreError +from synapse.storage.engines import PostgresEngine +from synapse.util.caches.descriptors import Cache +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 7034a61399..bbc3355c73 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,18 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import abc +import logging + +from canonicaljson import json + from twisted.internet import defer from synapse.storage._base import SQLBaseStore from synapse.storage.util.id_generators import StreamIdGenerator - -from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.caches.descriptors import cached, cachedInlineCallbacks - -from canonicaljson import json - -import abc -import logging +from synapse.util.caches.stream_change_cache import StreamChangeCache logger = logging.getLogger(__name__) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 4d32d0bdf6..9f12b360bc 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -15,14 +15,16 @@ # limitations under the License. import logging import re -from twisted.internet import defer + from canonicaljson import json +from twisted.internet import defer + from synapse.appservice import AppServiceTransaction from synapse.config.appservice import load_appservices from synapse.storage.events import EventsWorkerStore -from ._base import SQLBaseStore +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index af18964510..dc9eca7d15 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from . import engines - -from twisted.internet import defer +import logging from canonicaljson import json -import logging +from twisted.internet import defer + +from . import engines +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 968d2fed22..b78eda3413 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -15,15 +15,14 @@ import logging -from twisted.internet import defer +from six import iteritems -from ._base import Cache -from . import background_updates +from twisted.internet import defer from synapse.util.caches import CACHE_SIZE_FACTOR -from six import iteritems - +from . import background_updates +from ._base import Cache logger = logging.getLogger(__name__) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 38addbf9c0..73646da025 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -19,10 +19,9 @@ from canonicaljson import json from twisted.internet import defer -from .background_updates import BackgroundUpdateStore - from synapse.util.caches.expiringcache import ExpiringCache +from .background_updates import BackgroundUpdateStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 2ed9ada783..ec68e39f1e 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -14,15 +14,16 @@ # limitations under the License. import logging +from six import iteritems, itervalues + +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import StoreError -from ._base import SQLBaseStore, Cache -from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks - -from canonicaljson import json +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from six import itervalues, iteritems +from ._base import Cache, SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index d0c0059757..808194236a 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached - -from synapse.api.errors import SynapseError +from collections import namedtuple from twisted.internet import defer -from collections import namedtuple +from synapse.api.errors import SynapseError +from synapse.util.caches.descriptors import cached +from ._base import SQLBaseStore RoomAliasMapping = namedtuple( "RoomAliasMapping", diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 181047c8b7..7ae5c65482 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -12,16 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from six import iteritems + +from canonicaljson import encode_canonical_json, json + from twisted.internet import defer from synapse.util.caches.descriptors import cached -from canonicaljson import encode_canonical_json, json - from ._base import SQLBaseStore -from six import iteritems - class EndToEndKeyStore(SQLBaseStore): def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys): diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index 8c868ece75..e2f9de8451 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -13,13 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import IncorrectDatabaseSetup -from .postgres import PostgresEngine -from .sqlite3 import Sqlite3Engine - import importlib import platform +from ._base import IncorrectDatabaseSetup +from .postgres import PostgresEngine +from .sqlite3 import Sqlite3Engine SUPPORTED_MODULE = { "sqlite3": Sqlite3Engine, diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py index 60f0fa7fb3..19949fc474 100644 --- a/synapse/storage/engines/sqlite3.py +++ b/synapse/storage/engines/sqlite3.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.prepare_database import prepare_database - import struct import threading +from synapse.storage.prepare_database import prepare_database + class Sqlite3Engine(object): single_threaded = True diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 8fbf7ffba7..8d366d1b91 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -12,23 +12,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging import random +from six.moves import range +from six.moves.queue import Empty, PriorityQueue + +from unpaddedbase64 import encode_base64 + from twisted.internet import defer +from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.events import EventsWorkerStore from synapse.storage.signatures import SignatureWorkerStore - -from synapse.api.errors import StoreError from synapse.util.caches.descriptors import cached -from unpaddedbase64 import encode_base64 - -import logging -from six.moves.queue import PriorityQueue, Empty - -from six.moves import range - logger = logging.getLogger(__name__) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 05cb3f61ce..29b511ae5e 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -14,15 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage._base import SQLBaseStore, LoggingTransaction -from twisted.internet import defer -from synapse.util.caches.descriptors import cachedInlineCallbacks - import logging +from six import iteritems + from canonicaljson import json -from six import iteritems +from twisted.internet import defer + +from synapse.storage._base import LoggingTransaction, SQLBaseStore +from synapse.util.caches.descriptors import cachedInlineCallbacks logger = logging.getLogger(__name__) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a54abb9edd..2aaab0d02c 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -14,37 +14,33 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict, deque, namedtuple -from functools import wraps import itertools import logging +from collections import OrderedDict, deque, namedtuple +from functools import wraps + +from six import iteritems, itervalues +from six.moves import range from canonicaljson import json +from prometheus_client import Counter from twisted.internet import defer +import synapse.metrics +from synapse.api.constants import EventTypes +from synapse.api.errors import SynapseError +# these are only included to make the type annotations work +from synapse.events import EventBase # noqa: F401 +from synapse.events.snapshot import EventContext # noqa: F401 from synapse.storage.events_worker import EventsWorkerStore +from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util.async import ObservableDeferred +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import ( - PreserveLoggingContext, make_deferred_yieldable, -) +from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable from synapse.util.logutils import log_function from synapse.util.metrics import Measure -from synapse.api.constants import EventTypes -from synapse.api.errors import SynapseError -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks -from synapse.types import get_domain_from_id, RoomStreamToken -import synapse.metrics - -# these are only included to make the type annotations work -from synapse.events import EventBase # noqa: F401 -from synapse.events.snapshot import EventContext # noqa: F401 - -from six.moves import range -from six import itervalues, iteritems - -from prometheus_client import Counter logger = logging.getLogger(__name__) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 896225aab9..5fe1fd13e5 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -12,29 +12,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore +import logging +from collections import namedtuple + +from canonicaljson import json from twisted.internet import defer +from synapse.api.errors import SynapseError +# these are only included to make the type annotations work +from synapse.events import EventBase # noqa: F401 from synapse.events import FrozenEvent +from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event - from synapse.util.logcontext import ( - PreserveLoggingContext, make_deferred_yieldable, run_in_background, LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, + run_in_background, ) from synapse.util.metrics import Measure -from synapse.api.errors import SynapseError -from collections import namedtuple - -import logging - -from canonicaljson import json - -# these are only included to make the type annotations work -from synapse.events import EventBase # noqa: F401 -from synapse.events.snapshot import EventContext # noqa: F401 +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index eae6027cee..2d5896c5b4 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from canonicaljson import encode_canonical_json, json + from twisted.internet import defer -from ._base import SQLBaseStore -from synapse.api.errors import SynapseError, Codes +from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks -from canonicaljson import encode_canonical_json, json +from ._base import SQLBaseStore class FilteringStore(SQLBaseStore): diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index b77402d295..592d1b4c2a 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -14,15 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import SynapseError from ._base import SQLBaseStore -from canonicaljson import json - - # The category ID for the "default" category. We don't store as null in the # database to avoid the fun of null != null _DEFAULT_CATEGORY_ID = "" diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 0f13b61da8..f547977600 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -13,17 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks +import hashlib +import logging -from twisted.internet import defer import six -import OpenSSL from signedjson.key import decode_verify_key_bytes -import hashlib -import logging +import OpenSSL +from twisted.internet import defer + +from synapse.util.caches.descriptors import cachedInlineCallbacks + +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index cf2aae0468..b290f834b3 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -20,7 +20,6 @@ import logging import os import re - logger = logging.getLogger(__name__) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index f05d91cc58..a0c7a0dc87 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -13,13 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore +from collections import namedtuple + +from twisted.internet import defer + from synapse.api.constants import PresenceState -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList from synapse.util import batch_iter +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from collections import namedtuple -from twisted.internet import defer +from ._base import SQLBaseStore class UserPresenceState(namedtuple("UserPresenceState", diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 8612bd5ecc..60295da254 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -15,8 +15,8 @@ from twisted.internet import defer -from synapse.storage.roommember import ProfileInfo from synapse.api.errors import StoreError +from synapse.storage.roommember import ProfileInfo from ._base import SQLBaseStore diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 9e52e992b3..be655d287b 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -14,21 +14,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore +import abc +import logging + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.api.constants import EventTypes +from synapse.push.baserules import list_with_base_rules from synapse.storage.appservice import ApplicationServiceWorkerStore from synapse.storage.pusher import PusherWorkerStore from synapse.storage.receipts import ReceiptsWorkerStore from synapse.storage.roommember import RoomMemberWorkerStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.push.baserules import list_with_base_rules -from synapse.api.constants import EventTypes -from twisted.internet import defer -from canonicaljson import json - -import abc -import logging +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index c6def861cf..cc273a57b2 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -14,15 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from twisted.internet import defer +import logging +import types from canonicaljson import encode_canonical_json, json +from twisted.internet import defer + from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList -import logging -import types +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index f230a3bab7..3738901ea4 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -14,18 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from .util.id_generators import StreamIdGenerator -from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches.stream_change_cache import StreamChangeCache - -from twisted.internet import defer +import abc +import logging from canonicaljson import json -import abc -import logging +from twisted.internet import defer +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList +from synapse.util.caches.stream_change_cache import StreamChangeCache + +from ._base import SQLBaseStore +from .util.id_generators import StreamIdGenerator logger = logging.getLogger(__name__) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 0d18f6d869..07333f777d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -15,15 +15,15 @@ import re +from six.moves import range + from twisted.internet import defer -from synapse.api.errors import StoreError, Codes +from synapse.api.errors import Codes, StoreError from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks -from six.moves import range - class RegistrationWorkerStore(SQLBaseStore): @cached() diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py index 40acb5c4ed..880f047adb 100644 --- a/synapse/storage/rejections.py +++ b/synapse/storage/rejections.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore - import logging +from ._base import SQLBaseStore + logger = logging.getLogger(__name__) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index ca0eb187e5..3147fb6827 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -13,6 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections +import logging +import re + +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import StoreError @@ -20,12 +26,6 @@ from synapse.storage._base import SQLBaseStore from synapse.storage.search import SearchStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks -from canonicaljson import json - -import collections -import logging -import re - logger = logging.getLogger(__name__) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 8fc9549a75..02a802bed9 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -14,24 +14,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - +import logging from collections import namedtuple +from six import iteritems, itervalues + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, Membership from synapse.storage.events import EventsWorkerStore +from synapse.types import get_domain_from_id from synapse.util.async import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.stringutils import to_ascii -from synapse.api.constants import Membership, EventTypes -from synapse.types import get_domain_from_id - -import logging -from canonicaljson import json - -from six import itervalues, iteritems - logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/25/fts.py b/synapse/storage/schema/delta/25/fts.py index e7351c3ae6..4b2ffd35fd 100644 --- a/synapse/storage/schema/delta/25/fts.py +++ b/synapse/storage/schema/delta/25/fts.py @@ -14,11 +14,11 @@ import logging -from synapse.storage.prepare_database import get_statements -from synapse.storage.engines import PostgresEngine, Sqlite3Engine - import simplejson +from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/27/ts.py b/synapse/storage/schema/delta/27/ts.py index 6df57b5206..414f9f5aa0 100644 --- a/synapse/storage/schema/delta/27/ts.py +++ b/synapse/storage/schema/delta/27/ts.py @@ -14,10 +14,10 @@ import logging -from synapse.storage.prepare_database import get_statements - import simplejson +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index 85bd1a2006..ef7ec34346 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from synapse.config.appservice import load_appservices from six.moves import range +from synapse.config.appservice import load_appservices logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/31/search_update.py b/synapse/storage/schema/delta/31/search_update.py index fe6b7d196d..7d8ca5f93f 100644 --- a/synapse/storage/schema/delta/31/search_update.py +++ b/synapse/storage/schema/delta/31/search_update.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.engines import PostgresEngine -from synapse.storage.prepare_database import get_statements - import logging + import simplejson +from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/33/event_fields.py b/synapse/storage/schema/delta/33/event_fields.py index 1e002f9db2..bff1256a7b 100644 --- a/synapse/storage/schema/delta/33/event_fields.py +++ b/synapse/storage/schema/delta/33/event_fields.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.prepare_database import get_statements - import logging + import simplejson +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/delta/33/remote_media_ts.py index 55ae43f395..9754d3ccfb 100644 --- a/synapse/storage/schema/delta/33/remote_media_ts.py +++ b/synapse/storage/schema/delta/33/remote_media_ts.py @@ -14,7 +14,6 @@ import time - ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT" diff --git a/synapse/storage/schema/delta/34/cache_stream.py b/synapse/storage/schema/delta/34/cache_stream.py index 3b63a1562d..cf09e43e2b 100644 --- a/synapse/storage/schema/delta/34/cache_stream.py +++ b/synapse/storage/schema/delta/34/cache_stream.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.prepare_database import get_statements -from synapse.storage.engines import PostgresEngine - import logging +from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/34/received_txn_purge.py b/synapse/storage/schema/delta/34/received_txn_purge.py index 033144341c..67d505e68b 100644 --- a/synapse/storage/schema/delta/34/received_txn_purge.py +++ b/synapse/storage/schema/delta/34/received_txn_purge.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.engines import PostgresEngine - import logging +from synapse.storage.engines import PostgresEngine + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/34/sent_txn_purge.py b/synapse/storage/schema/delta/34/sent_txn_purge.py index 81948e3431..0ffab10b6f 100644 --- a/synapse/storage/schema/delta/34/sent_txn_purge.py +++ b/synapse/storage/schema/delta/34/sent_txn_purge.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.engines import PostgresEngine - import logging +from synapse.storage.engines import PostgresEngine + logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/37/remove_auth_idx.py b/synapse/storage/schema/delta/37/remove_auth_idx.py index 20ad8bd5a6..a377884169 100644 --- a/synapse/storage/schema/delta/37/remove_auth_idx.py +++ b/synapse/storage/schema/delta/37/remove_auth_idx.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.prepare_database import get_statements -from synapse.storage.engines import PostgresEngine - import logging +from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import get_statements + logger = logging.getLogger(__name__) DROP_INDICES = """ diff --git a/synapse/storage/schema/delta/42/user_dir.py b/synapse/storage/schema/delta/42/user_dir.py index ea6a18196d..506f326f4d 100644 --- a/synapse/storage/schema/delta/42/user_dir.py +++ b/synapse/storage/schema/delta/42/user_dir.py @@ -14,8 +14,8 @@ import logging -from synapse.storage.prepare_database import get_statements from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 9b77c45318..d5b5df93e6 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -13,19 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple import logging import re -from canonicaljson import json +from collections import namedtuple from six import string_types +from canonicaljson import json + from twisted.internet import defer -from .background_updates import BackgroundUpdateStore from synapse.api.errors import SynapseError from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from .background_updates import BackgroundUpdateStore + logger = logging.getLogger(__name__) SearchEntry = namedtuple('SearchEntry', [ diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 25922e5a9c..470212aa2a 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer import six -from ._base import SQLBaseStore - from unpaddedbase64 import encode_base64 + +from twisted.internet import defer + from synapse.crypto.event_signing import compute_event_reference_hash from synapse.util.caches.descriptors import cached, cachedList +from ._base import SQLBaseStore + # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview if six.PY2: diff --git a/synapse/storage/state.py b/synapse/storage/state.py index cd9821c270..89a05c4618 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple import logging +from collections import namedtuple from six import iteritems, itervalues from six.moves import range @@ -23,10 +23,11 @@ from twisted.internet import defer from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.engines import PostgresEngine -from synapse.util.caches import intern_string, get_cache_factor_for +from synapse.util.caches import get_cache_factor_for, intern_string from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.stringutils import to_ascii + from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index fb463c525a..66856342f0 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -33,22 +33,20 @@ what sort order was used: and stream ordering columns respectively. """ +import abc +import logging +from collections import namedtuple + +from six.moves import range + from twisted.internet import defer from synapse.storage._base import SQLBaseStore +from synapse.storage.engines import PostgresEngine from synapse.storage.events import EventsWorkerStore - from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from synapse.storage.engines import PostgresEngine - -import abc -import logging - -from six.moves import range -from collections import namedtuple - logger = logging.getLogger(__name__) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 04d123ed95..0f657b2bd3 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -14,16 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.account_data import AccountDataWorkerStore +import logging -from synapse.util.caches.descriptors import cached -from twisted.internet import defer +from six.moves import range from canonicaljson import json -import logging +from twisted.internet import defer -from six.moves import range +from synapse.storage.account_data import AccountDataWorkerStore +from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index acbc03446e..c3bc94f56d 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -13,17 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached +import logging +from collections import namedtuple -from twisted.internet import defer import six from canonicaljson import encode_canonical_json, json -from collections import namedtuple +from twisted.internet import defer -import logging +from synapse.util.caches.descriptors import cached + +from ._base import SQLBaseStore # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 275c299998..ce59e70d0e 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -13,19 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +import logging +import re -from ._base import SQLBaseStore +from six import iteritems + +from twisted.internet import defer -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.api.constants import EventTypes, JoinRules from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import get_domain_from_id, get_localpart_from_id +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks -from six import iteritems - -import re -import logging +from ._base import SQLBaseStore logger = logging.getLogger(__name__) diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py index 47bfc01e84..be013f4427 100644 --- a/synapse/storage/user_erasure_store.py +++ b/synapse/storage/user_erasure_store.py @@ -17,7 +17,7 @@ import operator from twisted.internet import defer from synapse.storage._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedList, cached +from synapse.util.caches.descriptors import cached, cachedList class UserErasureWorkerStore(SQLBaseStore): diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 95031dc9ec..d6160d5e4d 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import deque import contextlib import threading +from collections import deque class IdGenerator(object): diff --git a/synapse/streams/config.py b/synapse/streams/config.py index ca78e551cb..46ccbbda7d 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -13,11 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError -from synapse.types import StreamToken - import logging +from synapse.api.errors import SynapseError +from synapse.types import StreamToken logger = logging.getLogger(__name__) diff --git a/synapse/streams/events.py b/synapse/streams/events.py index f03ad99118..e5220132a3 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -15,13 +15,12 @@ from twisted.internet import defer -from synapse.types import StreamToken - +from synapse.handlers.account_data import AccountDataEventSource from synapse.handlers.presence import PresenceEventSource +from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource -from synapse.handlers.receipts import ReceiptEventSource -from synapse.handlers.account_data import AccountDataEventSource +from synapse.types import StreamToken class EventSources(object): diff --git a/synapse/types.py b/synapse/types.py index cc7c182a78..08f058f714 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -13,11 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import string +from collections import namedtuple from synapse.api.errors import SynapseError -from collections import namedtuple - class Requester(namedtuple("Requester", [ "user", "access_token_id", "is_guest", "device_id", "app_service", diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index e9886ef299..680ea928c7 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -17,6 +17,7 @@ import logging from itertools import islice import attr + from twisted.internet import defer, task from synapse.util.logcontext import PreserveLoggingContext diff --git a/synapse/util/async.py b/synapse/util/async.py index 1668df4ce6..5d0fb39130 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -13,20 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +from contextlib import contextmanager + +from six.moves import range + from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.python import failure +from synapse.util import Clock, logcontext, unwrapFirstError + from .logcontext import ( - PreserveLoggingContext, make_deferred_yieldable, run_in_background + PreserveLoggingContext, + make_deferred_yieldable, + run_in_background, ) -from synapse.util import logcontext, unwrapFirstError, Clock - -from contextlib import contextmanager - -import logging - -from six.moves import range logger = logging.getLogger(__name__) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 900575eb3c..7b065b195e 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,12 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from prometheus_client.core import Gauge, REGISTRY, GaugeMetricFamily - import os -from six.moves import intern import six +from six.moves import intern + +from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 65a1042de1..f8a07df6b8 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -13,10 +13,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import functools +import inspect import logging +import threading +from collections import namedtuple +import six +from six import itervalues, string_types + +from twisted.internet import defer + +from synapse.util import logcontext, unwrapFirstError from synapse.util.async import ObservableDeferred -from synapse.util import unwrapFirstError, logcontext from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry @@ -24,17 +33,6 @@ from synapse.util.stringutils import to_ascii from . import register_cache -from twisted.internet import defer -from collections import namedtuple - -import functools -import inspect -import threading - -from six import string_types, itervalues -import six - - logger = logging.getLogger(__name__) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 95793d466d..6c0b5a4094 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.caches.lrucache import LruCache -from collections import namedtuple -from . import register_cache -import threading import logging +import threading +from collections import namedtuple + +from synapse.util.caches.lrucache import LruCache +from . import register_cache logger = logging.getLogger(__name__) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index ff04c91955..4abca91f6d 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -13,11 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.caches import register_cache - -from collections import OrderedDict import logging +from collections import OrderedDict +from synapse.util.caches import register_cache logger = logging.getLogger(__name__) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 1c5a982094..b684f24e7b 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -14,8 +14,8 @@ # limitations under the License. -from functools import wraps import threading +from functools import wraps from synapse.util.caches.treecache import TreeCache diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 0fb8620001..8637867c6d 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -13,12 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util import caches - +import logging from sortedcontainers import SortedDict -import logging +from synapse.util import caches logger = logging.getLogger(__name__) diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index c78801015b..629ed44149 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -13,12 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from six.moves import queue + from twisted.internet import threads from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from six.moves import queue - class BackgroundFileConsumer(object): """A consumer that writes to a file like object. Supports both push diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 535e7d0e7a..581c6052ac 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from frozendict import frozendict -from canonicaljson import json - from six import string_types +from canonicaljson import json +from frozendict import frozendict + def freeze(o): if isinstance(o, dict): diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index e9f0f292ee..2d7ddc1cbe 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.web.resource import NoResource - import logging +from twisted.web.resource import NoResource + logger = logging.getLogger(__name__) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index df2b71b791..fe9288b031 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -22,10 +22,10 @@ them. See doc/log_contexts.rst for details on how this works. """ -from twisted.internet import defer - -import threading import logging +import threading + +from twisted.internet import defer logger = logging.getLogger(__name__) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index 3e42868ea9..a46bc47ce3 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -14,10 +14,11 @@ # limitations under the License. -from six import StringIO import logging import traceback +from six import StringIO + class LogFormatter(logging.Formatter): """Log formatter which gives more detail for exceptions diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index 03249c5dc8..62a00189cc 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -14,13 +14,11 @@ # limitations under the License. -from inspect import getcallargs -from functools import wraps - -import logging import inspect +import logging import time - +from functools import wraps +from inspect import getcallargs _TIME_FUNC_ID = 0 diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 97e0f00b67..14be3c7396 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.conch.manhole import ColoredManhole -from twisted.conch.insults import insults from twisted.conch import manhole_ssh -from twisted.cred import checkers, portal +from twisted.conch.insults import insults +from twisted.conch.manhole import ColoredManhole from twisted.conch.ssh.keys import Key +from twisted.cred import checkers, portal PUBLIC_KEY = ( "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az" diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 1ba7d65c7c..63bc64c642 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +import logging +from functools import wraps from prometheus_client import Counter -from synapse.util.logcontext import LoggingContext -from functools import wraps -import logging +from twisted.internet import defer +from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index 607161e7f0..a6c30e5265 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -14,6 +14,7 @@ # limitations under the License. import phonenumbers + from synapse.api.errors import SynapseError diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index c5a45cef7c..5ac33b2132 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -13,20 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections +import contextlib +import logging + from twisted.internet import defer from synapse.api.errors import LimitExceededError - from synapse.util.logcontext import ( - run_in_background, make_deferred_yieldable, PreserveLoggingContext, + make_deferred_yieldable, + run_in_background, ) -import collections -import contextlib -import logging - - logger = logging.getLogger(__name__) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 4e93f69d3a..8a3a06fd74 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -12,14 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.util.logcontext -from twisted.internet import defer - -from synapse.api.errors import CodeMessageException - import logging import random +from twisted.internet import defer + +import synapse.util.logcontext +from synapse.api.errors import CodeMessageException logger = logging.getLogger(__name__) diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py index f4a9abf83f..6c0f2bb0cf 100644 --- a/synapse/util/rlimit.py +++ b/synapse/util/rlimit.py @@ -13,9 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import resource import logging - +import resource logger = logging.getLogger("synapse.app.homeserver") diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index b98b9dc6e4..43d9db67ec 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -15,6 +15,7 @@ import random import string + from six.moves import range _string_with_symbols = ( diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index 52086df465..1fbcd41115 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -14,9 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import subprocess -import os import logging +import os +import subprocess logger = logging.getLogger(__name__) diff --git a/synapse/visibility.py b/synapse/visibility.py index 65d79cf0d0..015c2bab37 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -20,9 +20,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events.utils import prune_event -from synapse.util.logcontext import ( - make_deferred_yieldable, preserve_fn, -) +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn logger = logging.getLogger(__name__) diff --git a/tests/__init__.py b/tests/__init__.py index aab20e8e02..24006c949e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -14,4 +14,5 @@ # limitations under the License. from twisted.trial import util + util.DEFAULT_TIMEOUT_DURATION = 10 diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index aec3b62897..5f158ec4b9 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -13,16 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pymacaroons from mock import Mock + +import pymacaroons + from twisted.internet import defer import synapse.handlers.auth from synapse.api.auth import Auth from synapse.api.errors import AuthError from synapse.types import UserID + from tests import unittest -from tests.utils import setup_test_homeserver, mock_getRawHeaders +from tests.utils import mock_getRawHeaders, setup_test_homeserver class TestHandlers(object): diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index dcceca7f3e..836a23fb54 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -13,19 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests import unittest -from twisted.internet import defer - from mock import Mock -from tests.utils import ( - MockHttpResource, DeferredMockCallable, setup_test_homeserver -) +import jsonschema + +from twisted.internet import defer + +from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events import FrozenEvent -from synapse.api.errors import SynapseError -import jsonschema +from tests import unittest +from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver user_localpart = "test_user" diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 5b2b95860a..891e0cc973 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -12,14 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.appservice import ApplicationService +import re + +from mock import Mock from twisted.internet import defer -from mock import Mock -from tests import unittest +from synapse.appservice import ApplicationService -import re +from tests import unittest def _regex(regex, exclusive=True): diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 9181692771..b9f4863e9a 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -12,17 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + +from twisted.internet import defer + from synapse.appservice import ApplicationServiceState from synapse.appservice.scheduler import ( - _ServiceQueuer, _TransactionController, _Recoverer + _Recoverer, + _ServiceQueuer, + _TransactionController, ) -from twisted.internet import defer - from synapse.util.logcontext import make_deferred_yieldable -from ..utils import MockClock -from mock import Mock + from tests import unittest +from ..utils import MockClock + class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 879159ccea..eb7f0ab12a 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -19,6 +19,7 @@ import shutil import tempfile from synapse.config.homeserver import HomeServerConfig + from tests import unittest diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 772afd2cf9..5c422eff38 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -15,8 +15,11 @@ import os.path import shutil import tempfile + import yaml + from synapse.config.homeserver import HomeServerConfig + from tests import unittest diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 47cb328a01..cd11871b80 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -14,15 +14,13 @@ # limitations under the License. -from tests import unittest - -from synapse.events.builder import EventBuilder -from synapse.crypto.event_signing import add_hashes_and_signatures - +import nacl.signing from unpaddedbase64 import decode_base64 -import nacl.signing +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.events.builder import EventBuilder +from tests import unittest # Perform these tests using given secret key so we get entirely deterministic # signatures output that we can test against. diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index cc1c862ba4..a9d37fe084 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -14,15 +14,19 @@ # limitations under the License. import time +from mock import Mock + import signedjson.key import signedjson.sign -from mock import Mock + +from twisted.internet import defer, reactor + from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.util import logcontext, Clock +from synapse.util import Clock, logcontext from synapse.util.logcontext import LoggingContext + from tests import unittest, utils -from twisted.internet import defer, reactor class MockPerspectiveServer(object): diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index dfc870066e..f51d99419e 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -14,11 +14,11 @@ # limitations under the License. -from .. import unittest - from synapse.events import FrozenEvent from synapse.events.utils import prune_event, serialize_event +from .. import unittest + def MockEvent(**kwargs): if "event_id" not in kwargs: diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 4e8dc8fea0..c91e25f54f 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -16,6 +16,7 @@ import logging from synapse.events import FrozenEvent from synapse.federation.federation_server import server_matches_acl_event + from tests import unittest diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index b753455943..57c0771cf3 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -13,13 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer -from .. import unittest -from tests.utils import MockClock from synapse.handlers.appservice import ApplicationServicesHandler -from mock import Mock +from tests.utils import MockClock + +from .. import unittest class AppServiceHandlerTestCase(unittest.TestCase): diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 1822dcf1e0..2e5e8e4dec 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -14,11 +14,13 @@ # limitations under the License. import pymacaroons + from twisted.internet import defer import synapse import synapse.api.errors from synapse.handlers.auth import AuthHandler + from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 778ff2f6e9..633a0b7f36 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -17,8 +17,8 @@ from twisted.internet import defer import synapse.api.errors import synapse.handlers.device - import synapse.storage + from tests import unittest, utils user1 = "@boris:aaa" diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 7e5332e272..a353070316 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -14,14 +14,14 @@ # limitations under the License. -from tests import unittest -from twisted.internet import defer - from mock import Mock +from twisted.internet import defer + from synapse.handlers.directory import DirectoryHandler from synapse.types import RoomAlias +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index d1bd87b898..ca1542236d 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -14,13 +14,14 @@ # limitations under the License. import mock -from synapse.api import errors + from twisted.internet import defer import synapse.api.errors import synapse.handlers.e2e_keys - import synapse.storage +from synapse.api import errors + from tests import unittest, utils diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index de06a6ad30..121ce78634 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -14,18 +14,22 @@ # limitations under the License. -from tests import unittest - from mock import Mock, call from synapse.api.constants import PresenceState from synapse.handlers.presence import ( - handle_update, handle_timeout, - IDLE_TIMER, SYNC_ONLINE_TIMEOUT, LAST_ACTIVE_GRANULARITY, FEDERATION_TIMEOUT, FEDERATION_PING_INTERVAL, + FEDERATION_TIMEOUT, + IDLE_TIMER, + LAST_ACTIVE_GRANULARITY, + SYNC_ONLINE_TIMEOUT, + handle_timeout, + handle_update, ) from synapse.storage.presence import UserPresenceState +from tests import unittest + class PresenceUpdateTestCase(unittest.TestCase): def test_offline_to_online(self): diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 458296ee4c..dc17918a3d 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -14,16 +14,16 @@ # limitations under the License. -from tests import unittest -from twisted.internet import defer - from mock import Mock, NonCallableMock +from twisted.internet import defer + import synapse.types from synapse.api.errors import AuthError from synapse.handlers.profile import ProfileHandler from synapse.types import UserID +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index e990e45220..025fa1be81 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer -from .. import unittest from synapse.handlers.register import RegistrationHandler from synapse.types import UserID, create_requester from tests.utils import setup_test_homeserver -from mock import Mock +from .. import unittest class RegistrationHandlers(object): diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index a433bbfa8a..b08856f763 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -14,19 +14,24 @@ # limitations under the License. -from tests import unittest -from twisted.internet import defer - -from mock import Mock, call, ANY import json -from ..utils import ( - MockHttpResource, MockClock, DeferredMockCallable, setup_test_homeserver -) +from mock import ANY, Mock, call + +from twisted.internet import defer from synapse.api.errors import AuthError from synapse.types import UserID +from tests import unittest + +from ..utils import ( + DeferredMockCallable, + MockClock, + MockHttpResource, + setup_test_homeserver, +) + def _expect_edu(destination, edu_type, content, origin="test"): return { diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py index b8a48d20a4..60e6a75953 100644 --- a/tests/http/test_endpoint.py +++ b/tests/http/test_endpoint.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.endpoint import ( - parse_server_name, - parse_and_validate_server_name, -) +from synapse.http.endpoint import parse_and_validate_server_name, parse_server_name + from tests import unittest diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 64e07a8c93..8708c8a196 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -12,17 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from tests import unittest - import tempfile from mock import Mock, NonCallableMock -from tests.utils import setup_test_homeserver -from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory + +from twisted.internet import defer, reactor + from synapse.replication.tcp.client import ( - ReplicationClientHandler, ReplicationClientFactory, + ReplicationClientFactory, + ReplicationClientHandler, ) +from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory + +from tests import unittest +from tests.utils import setup_test_homeserver class BaseSlavedStoreTestCase(unittest.TestCase): diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py index f47a42e45d..adf226404e 100644 --- a/tests/replication/slave/storage/test_account_data.py +++ b/tests/replication/slave/storage/test_account_data.py @@ -13,11 +13,11 @@ # limitations under the License. -from ._base import BaseSlavedStoreTestCase +from twisted.internet import defer from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from twisted.internet import defer +from ._base import BaseSlavedStoreTestCase USER_ID = "@feeling:blue" TYPE = "my.type" diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index cb058d3142..cea01d93eb 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import BaseSlavedStoreTestCase +from twisted.internet import defer from synapse.events import FrozenEvent, _EventInternalMetadata from synapse.events.snapshot import EventContext from synapse.replication.slave.storage.events import SlavedEventStore from synapse.storage.roommember import RoomsForUser -from twisted.internet import defer - +from ._base import BaseSlavedStoreTestCase USER_ID = "@feeling:blue" USER_ID_2 = "@bright:blue" diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py index 6624fe4eea..e6d670cc1f 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/replication/slave/storage/test_receipts.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import BaseSlavedStoreTestCase +from twisted.internet import defer from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from twisted.internet import defer +from ._base import BaseSlavedStoreTestCase USER_ID = "@feeling:blue" ROOM_ID = "!room:blue" diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 6a757289db..eee99ca2e0 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,10 +1,11 @@ -from synapse.rest.client.transactions import HttpTransactionCache -from synapse.rest.client.transactions import CLEANUP_PERIOD_MS -from twisted.internet import defer, reactor from mock import Mock, call +from twisted.internet import defer, reactor + +from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache from synapse.util import Clock from synapse.util.logcontext import LoggingContext + from tests import unittest from tests.utils import MockClock diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index f5a7258e68..a5af36a99c 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -14,7 +14,7 @@ # limitations under the License. """ Tests REST events for /events paths.""" -from tests import unittest +from mock import Mock, NonCallableMock # twisted imports from twisted.internet import defer @@ -23,13 +23,11 @@ import synapse.rest.client.v1.events import synapse.rest.client.v1.register import synapse.rest.client.v1.room +from tests import unittest from ....utils import MockHttpResource, setup_test_homeserver from .utils import RestTestCase -from mock import Mock, NonCallableMock - - PATH_PREFIX = "/_matrix/client/api/v1" diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index dc94b8bd19..d71cc8e0db 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -15,12 +15,15 @@ """Tests REST events for /profile paths.""" from mock import Mock + from twisted.internet import defer import synapse.types -from synapse.api.errors import SynapseError, AuthError +from synapse.api.errors import AuthError, SynapseError from synapse.rest.client.v1 import profile + from tests import unittest + from ....utils import MockHttpResource, setup_test_homeserver myid = "@1234ABCD:test" diff --git a/tests/rest/client/v1/test_register.py b/tests/rest/client/v1/test_register.py index a6a4e2ffe0..f596acb85f 100644 --- a/tests/rest/client/v1/test_register.py +++ b/tests/rest/client/v1/test_register.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.rest.client.v1.register import CreateUserRestServlet -from twisted.internet import defer +import json + from mock import Mock + +from twisted.internet import defer + +from synapse.rest.client.v1.register import CreateUserRestServlet + from tests import unittest from tests.utils import mock_getRawHeaders -import json class CreateUserServletTestCase(unittest.TestCase): diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 61d737725b..895dffa095 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -15,22 +15,21 @@ """Tests REST events for /rooms paths.""" +import json + +from mock import Mock, NonCallableMock +from six.moves.urllib import parse as urlparse + # twisted imports from twisted.internet import defer import synapse.rest.client.v1.room from synapse.api.constants import Membership - from synapse.types import UserID -import json -from six.moves.urllib import parse as urlparse - from ....utils import MockHttpResource, setup_test_homeserver from .utils import RestTestCase -from mock import Mock, NonCallableMock - PATH_PREFIX = "/_matrix/client/api/v1" diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index fe161ee5cb..bddb3302e4 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -15,18 +15,17 @@ """Tests REST events for /rooms paths.""" +from mock import Mock, NonCallableMock + # twisted imports from twisted.internet import defer import synapse.rest.client.v1.room from synapse.types import UserID -from ....utils import MockHttpResource, MockClock, setup_test_homeserver +from ....utils import MockClock, MockHttpResource, setup_test_homeserver from .utils import RestTestCase -from mock import Mock, NonCallableMock - - PATH_PREFIX = "/_matrix/client/api/v1" diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 3bb1dd003a..54d7ba380d 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -13,16 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json +import time + # twisted imports from twisted.internet import defer -# trial imports -from tests import unittest - from synapse.api.constants import Membership -import json -import time +# trial imports +from tests import unittest class RestTestCase(unittest.TestCase): diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py index 5170217d9e..f18a8a6027 100644 --- a/tests/rest/client/v2_alpha/__init__.py +++ b/tests/rest/client/v2_alpha/__init__.py @@ -13,16 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests import unittest - from mock import Mock -from ....utils import MockHttpResource, setup_test_homeserver +from twisted.internet import defer from synapse.types import UserID -from twisted.internet import defer +from tests import unittest +from ....utils import MockHttpResource, setup_test_homeserver PATH_PREFIX = "/_matrix/client/v2_alpha" diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index 76b833e119..bb0b2f94ea 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -15,16 +15,13 @@ from twisted.internet import defer -from tests import unittest - -from synapse.rest.client.v2_alpha import filter - -from synapse.api.errors import Codes - import synapse.types - +from synapse.api.errors import Codes +from synapse.rest.client.v2_alpha import filter from synapse.types import UserID +from tests import unittest + from ....utils import MockHttpResource, setup_test_homeserver PATH_PREFIX = "/_matrix/client/v2_alpha" diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 8aba456510..9b57a56070 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,12 +1,15 @@ +import json + +from mock import Mock + +from twisted.internet import defer from twisted.python import failure +from synapse.api.errors import InteractiveAuthIncompleteError, SynapseError from synapse.rest.client.v2_alpha.register import RegisterRestServlet -from synapse.api.errors import SynapseError, InteractiveAuthIncompleteError -from twisted.internet import defer -from mock import Mock + from tests import unittest from tests.utils import mock_getRawHeaders -import json class RegisterRestServletTestCase(unittest.TestCase): diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index c5e2f5549a..bf254a260d 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -14,21 +14,21 @@ # limitations under the License. +import os +import shutil +import tempfile + +from mock import Mock + from twisted.internet import defer, reactor from synapse.rest.media.v1._base import FileInfo -from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.filepath import MediaFilePaths +from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend -from mock import Mock - from tests import unittest -import os -import shutil -import tempfile - class MediaStorageTests(unittest.TestCase): def setUp(self): diff --git a/tests/server.py b/tests/server.py index 73069dff52..46223ccf05 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1,15 +1,17 @@ +import json from io import BytesIO -import attr -import json from six import text_type -from twisted.python.failure import Failure +import attr + +from twisted.internet import threads from twisted.internet.defer import Deferred +from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactorClock from synapse.http.site import SynapseRequest -from twisted.internet import threads + from tests.utils import setup_test_homeserver as _sth diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 3cfa21c9f8..6d6f00c5c5 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -14,15 +14,15 @@ # limitations under the License. -from tests import unittest -from twisted.internet import defer - from mock import Mock -from synapse.util.async import ObservableDeferred +from twisted.internet import defer +from synapse.util.async import ObservableDeferred from synapse.util.caches.descriptors import Cache, cached +from tests import unittest + class CacheTestCase(unittest.TestCase): diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 00825498b1..099861b27c 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -12,21 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json +import os import tempfile -from synapse.config._base import ConfigError -from tests import unittest + +from mock import Mock + +import yaml + from twisted.internet import defer -from tests.utils import setup_test_homeserver from synapse.appservice import ApplicationService, ApplicationServiceState +from synapse.config._base import ConfigError from synapse.storage.appservice import ( - ApplicationServiceStore, ApplicationServiceTransactionStore + ApplicationServiceStore, + ApplicationServiceTransactionStore, ) -import json -import os -import yaml -from mock import Mock +from tests import unittest +from tests.utils import setup_test_homeserver class ApplicationServiceStoreTestCase(unittest.TestCase): diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 1286b4ce2d..ab1f310572 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -1,10 +1,10 @@ -from tests import unittest +from mock import Mock + from twisted.internet import defer +from tests import unittest from tests.utils import setup_test_homeserver -from mock import Mock - class BackgroundUpdateTestCase(unittest.TestCase): diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 0ac910e76f..1d1234ee39 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -14,18 +14,18 @@ # limitations under the License. -from tests import unittest -from twisted.internet import defer +from collections import OrderedDict from mock import Mock -from collections import OrderedDict +from twisted.internet import defer from synapse.server import HomeServer - from synapse.storage._base import SQLBaseStore from synapse.storage.engines import create_engine +from tests import unittest + class SQLBaseStoreTestCase(unittest.TestCase): """ Test the "simple" SQL generating methods in SQLBaseStore. """ diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index f8725acea0..a54cc6bc32 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -16,6 +16,7 @@ from twisted.internet import defer import synapse.api.errors + import tests.unittest import tests.utils diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index 95709cd50a..129ebaf343 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -14,12 +14,12 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer from synapse.storage.directory import DirectoryStore -from synapse.types import RoomID, RoomAlias +from synapse.types import RoomAlias, RoomID +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 3cbf9a78b1..8430fc7ba6 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer import tests.unittest import tests.utils -from mock import Mock USER_ID = "@user:example.com" diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 0be790d8f8..3a3d002782 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -14,6 +14,7 @@ # limitations under the License. import signedjson.key + from twisted.internet import defer import tests.unittest diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index f5fcb611d4..3276b39504 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -14,13 +14,13 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer from synapse.storage.presence import PresenceStore from synapse.types import UserID -from tests.utils import setup_test_homeserver, MockClock +from tests import unittest +from tests.utils import MockClock, setup_test_homeserver class PresenceStoreTestCase(unittest.TestCase): diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 423710c9c1..2c95e5e95a 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -14,12 +14,12 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer from synapse.storage.profile import ProfileStore from synapse.types import UserID +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 888ddfaddd..475ec900c4 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -14,16 +14,16 @@ # limitations under the License. -from tests import unittest +from mock import Mock + from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.types import UserID, RoomID +from synapse.types import RoomID, UserID +from tests import unittest from tests.utils import setup_test_homeserver -from mock import Mock - class RedactionTestCase(unittest.TestCase): diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index f863b75846..7821ea3fa3 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -14,9 +14,9 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index ef8a4d234f..ae8ae94b6d 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -14,12 +14,12 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.types import UserID, RoomID, RoomAlias +from synapse.types import RoomAlias, RoomID, UserID +from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 657b279e5d..c5fd54f67e 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -14,16 +14,16 @@ # limitations under the License. -from tests import unittest +from mock import Mock + from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.types import UserID, RoomID +from synapse.types import RoomID, UserID +from tests import unittest from tests.utils import setup_test_homeserver -from mock import Mock - class RoomMemberStoreTestCase(unittest.TestCase): diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 0891308f25..23fad12bca 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -17,6 +17,7 @@ from twisted.internet import defer from synapse.storage import UserDirectoryStore from synapse.storage.roommember import ProfileInfo + from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/test_distributor.py b/tests/test_distributor.py index c066381698..04a88056f1 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import unittest -from twisted.internet import defer - from mock import Mock, patch +from twisted.internet import defer + from synapse.util.distributor import Distributor +from . import unittest + class DistributorTestCase(unittest.TestCase): diff --git a/tests/test_dns.py b/tests/test_dns.py index 3b360a0fc7..b647d92697 100644 --- a/tests/test_dns.py +++ b/tests/test_dns.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import unittest +from mock import Mock + from twisted.internet import defer from twisted.names import dns, error -from mock import Mock - from synapse.http.endpoint import resolve_service from tests.utils import MockClock +from . import unittest + @unittest.DEBUG class DnsTestCase(unittest.TestCase): diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index d08e19c53a..06112430e5 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -13,10 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest + from synapse import event_auth from synapse.api.errors import AuthError from synapse.events import FrozenEvent -import unittest class EventAuthTestCase(unittest.TestCase): diff --git a/tests/test_federation.py b/tests/test_federation.py index fc80a69369..159a136971 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -1,14 +1,14 @@ -from twisted.internet.defer import succeed, maybeDeferred +from mock import Mock + +from twisted.internet.defer import maybeDeferred, succeed -from synapse.util import Clock from synapse.events import FrozenEvent from synapse.types import Requester, UserID +from synapse.util import Clock from tests import unittest -from tests.server import setup_test_homeserver, ThreadedMemoryReactorClock - -from mock import Mock +from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver class MessageAcceptTests(unittest.TestCase): diff --git a/tests/test_preview.py b/tests/test_preview.py index 5bd36c74aa..446843367e 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import unittest - from synapse.rest.media.v1.preview_url_resource import ( - summarize_paragraphs, decode_and_calc_og + decode_and_calc_og, + summarize_paragraphs, ) +from . import unittest + class PreviewTestCase(unittest.TestCase): diff --git a/tests/test_server.py b/tests/test_server.py index 8ad822c43b..4192013f6d 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -4,9 +4,10 @@ import re from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactorClock -from synapse.util import Clock from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource +from synapse.util import Clock + from tests import unittest from tests.server import make_request, setup_test_homeserver diff --git a/tests/test_state.py b/tests/test_state.py index 71c412faf4..c0f2d1152d 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -13,18 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests import unittest +from mock import Mock + from twisted.internet import defer -from synapse.events import FrozenEvent from synapse.api.auth import Auth from synapse.api.constants import EventTypes, Membership +from synapse.events import FrozenEvent from synapse.state import StateHandler, StateResolutionHandler -from .utils import MockClock - -from mock import Mock +from tests import unittest +from .utils import MockClock _next_event_id = 1000 diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index d28bb726bb..bc97c12245 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -14,7 +14,6 @@ # limitations under the License. from tests import unittest - from tests.utils import MockClock diff --git a/tests/test_types.py b/tests/test_types.py index 115def2287..729bd676c1 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests import unittest - from synapse.api.errors import SynapseError from synapse.server import HomeServer -from synapse.types import UserID, RoomAlias, GroupID +from synapse.types import GroupID, RoomAlias, UserID + +from tests import unittest mock_homeserver = HomeServer(hostname="my.domain") diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index a94d566c96..8176a7dabd 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -13,14 +13,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from functools import partial import logging +from functools import partial import mock + +from twisted.internet import defer, reactor + from synapse.api.errors import SynapseError from synapse.util import logcontext -from twisted.internet import defer, reactor from synapse.util.caches import descriptors + from tests import unittest logger = logging.getLogger(__name__) diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index 543ac5bed9..26f2fa5800 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -14,10 +14,10 @@ # limitations under the License. -from tests import unittest - from synapse.util.caches.dictionary_cache import DictionaryCache +from tests import unittest + class DictCacheTestCase(unittest.TestCase): diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 31d24adb8b..d12b5e838b 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -14,12 +14,12 @@ # limitations under the License. -from .. import unittest - from synapse.util.caches.expiringcache import ExpiringCache from tests.utils import MockClock +from .. import unittest + class ExpiringCacheTestCase(unittest.TestCase): diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index c2aae8f54c..7ce5f8c258 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -14,15 +14,16 @@ # limitations under the License. -from twisted.internet import defer, reactor +import threading + from mock import NonCallableMock +from six import StringIO + +from twisted.internet import defer, reactor from synapse.util.file_consumer import BackgroundFileConsumer from tests import unittest -from six import StringIO - -import threading class FileConsumerTests(unittest.TestCase): diff --git a/tests/util/test_limiter.py b/tests/util/test_limiter.py index 9c795d9fdb..a5a767b1ff 100644 --- a/tests/util/test_limiter.py +++ b/tests/util/test_limiter.py @@ -14,12 +14,12 @@ # limitations under the License. -from tests import unittest - from twisted.internet import defer from synapse.util.async import Limiter +from tests import unittest + class LimiterTestCase(unittest.TestCase): diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index bf7e3aa885..c95907b32c 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util import logcontext, Clock -from tests import unittest +from six.moves import range from twisted.internet import defer, reactor +from synapse.util import Clock, logcontext from synapse.util.async import Linearizer -from six.moves import range + +from tests import unittest class LinearizerTestCase(unittest.TestCase): diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 9cf90fcfc4..c54001f7a4 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -1,11 +1,11 @@ import twisted.python.failure -from twisted.internet import defer -from twisted.internet import reactor -from .. import unittest +from twisted.internet import defer, reactor -from synapse.util import logcontext, Clock +from synapse.util import Clock, logcontext from synapse.util.logcontext import LoggingContext +from .. import unittest + class LoggingContextTestCase(unittest.TestCase): diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py index 1a1a8412f2..297aebbfbe 100644 --- a/tests/util/test_logformatter.py +++ b/tests/util/test_logformatter.py @@ -15,6 +15,7 @@ import sys from synapse.util.logformatter import LogFormatter + from tests import unittest diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index dfb78cb8bd..9b36ef4482 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -14,12 +14,12 @@ # limitations under the License. -from .. import unittest +from mock import Mock from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache -from mock import Mock +from .. import unittest class LruCacheTestCase(unittest.TestCase): diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index 1d745ae1a7..24194e3b25 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -14,10 +14,10 @@ # limitations under the License. -from tests import unittest - from synapse.util.async import ReadWriteLock +from tests import unittest + class ReadWriteLockTestCase(unittest.TestCase): diff --git a/tests/util/test_snapshot_cache.py b/tests/util/test_snapshot_cache.py index d3a8630c2f..0f5b32fcc0 100644 --- a/tests/util/test_snapshot_cache.py +++ b/tests/util/test_snapshot_cache.py @@ -14,10 +14,11 @@ # limitations under the License. -from .. import unittest +from twisted.internet.defer import Deferred from synapse.util.caches.snapshot_cache import SnapshotCache -from twisted.internet.defer import Deferred + +from .. import unittest class SnapshotCacheTestCase(unittest.TestCase): diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index 67ece166c7..e3897c0d19 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -1,8 +1,9 @@ -from tests import unittest from mock import patch from synapse.util.caches.stream_change_cache import StreamChangeCache +from tests import unittest + class StreamChangeCacheTests(unittest.TestCase): """ diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 7ab578a185..a5f2261208 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -14,10 +14,10 @@ # limitations under the License. -from .. import unittest - from synapse.util.caches.treecache import TreeCache +from .. import unittest + class TreeCacheTestCase(unittest.TestCase): def test_get_set_onelevel(self): diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py index fdb24a48b0..03201a4d9b 100644 --- a/tests/util/test_wheel_timer.py +++ b/tests/util/test_wheel_timer.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .. import unittest - from synapse.util.wheel_timer import WheelTimer +from .. import unittest + class WheelTimerTestCase(unittest.TestCase): def test_single_insert_fetch(self): diff --git a/tests/utils.py b/tests/utils.py index 189fd2711c..6adbdbfca1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,9 +15,10 @@ import hashlib from inspect import getcallargs -from six.moves.urllib import parse as urlparse from mock import Mock, patch +from six.moves.urllib import parse as urlparse + from twisted.internet import defer, reactor from synapse.api.errors import CodeMessageException, cs_error -- cgit 1.5.1 From 55370331da54c46c04253b009865097fe9e95191 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Jul 2018 13:56:07 +0100 Subject: Refactor logcontext resource usage tracking (#3501) Factor out the resource usage tracking out to a separate object, which can be passed around and copied independently of the logcontext itself. --- changelog.d/3501.misc | 0 synapse/http/request_metrics.py | 83 +++++------------------ synapse/http/site.py | 24 +++---- synapse/util/logcontext.py | 144 +++++++++++++++++++++++++++++++--------- synapse/util/metrics.py | 25 +++---- 5 files changed, 146 insertions(+), 130 deletions(-) create mode 100644 changelog.d/3501.misc (limited to 'synapse/util') diff --git a/changelog.d/3501.misc b/changelog.d/3501.misc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index bcaa2a9ecc..f24b4b949c 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -149,7 +149,9 @@ class RequestMetrics(object): self.name = name self.method = method - self._request_stats = _RequestStats.from_context(self.start_context) + # _request_stats records resource usage that we have already added + # to the "in flight" metrics. + self._request_stats = self.start_context.get_resource_usage() _in_flight_requests.add(self) @@ -177,18 +179,22 @@ class RequestMetrics(object): time_sec - self.start ) - ru_utime, ru_stime = context.get_resource_usage() + resource_usage = context.get_resource_usage() - response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) - response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) + response_ru_utime.labels(request.method, self.name, tag).inc( + resource_usage.ru_utime, + ) + response_ru_stime.labels(request.method, self.name, tag).inc( + resource_usage.ru_stime, + ) response_db_txn_count.labels(request.method, self.name, tag).inc( - context.db_txn_count + resource_usage.db_txn_count ) response_db_txn_duration.labels(request.method, self.name, tag).inc( - context.db_txn_duration_sec + resource_usage.db_txn_duration_sec ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_sec + resource_usage.db_sched_duration_sec ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -201,7 +207,10 @@ class RequestMetrics(object): def update_metrics(self): """Updates the in flight metrics with values from this request. """ - diff = self._request_stats.update(self.start_context) + new_stats = self.start_context.get_resource_usage() + + diff = new_stats - self._request_stats + self._request_stats = new_stats in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) @@ -217,61 +226,3 @@ class RequestMetrics(object): in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( diff.db_sched_duration_sec ) - - -class _RequestStats(object): - """Keeps tracks of various metrics for an in flight request. - """ - - __slots__ = [ - "ru_utime", - "ru_stime", - "db_txn_count", - "db_txn_duration_sec", - "db_sched_duration_sec", - ] - - def __init__( - self, ru_utime, ru_stime, db_txn_count, db_txn_duration_sec, db_sched_duration_sec - ): - self.ru_utime = ru_utime - self.ru_stime = ru_stime - self.db_txn_count = db_txn_count - self.db_txn_duration_sec = db_txn_duration_sec - self.db_sched_duration_sec = db_sched_duration_sec - - @staticmethod - def from_context(context): - ru_utime, ru_stime = context.get_resource_usage() - - return _RequestStats( - ru_utime, ru_stime, - context.db_txn_count, - context.db_txn_duration_sec, - context.db_sched_duration_sec, - ) - - def update(self, context): - """Updates the current values and returns the difference between the - old and new values. - - Returns: - _RequestStats: The difference between the old and new values - """ - new = _RequestStats.from_context(context) - - diff = _RequestStats( - new.ru_utime - self.ru_utime, - new.ru_stime - self.ru_stime, - new.db_txn_count - self.db_txn_count, - new.db_txn_duration_sec - self.db_txn_duration_sec, - new.db_sched_duration_sec - self.db_sched_duration_sec, - ) - - self.ru_utime = new.ru_utime - self.ru_stime = new.ru_stime - self.db_txn_count = new.db_txn_count - self.db_txn_duration_sec = new.db_txn_duration_sec - self.db_sched_duration_sec = new.db_sched_duration_sec - - return diff diff --git a/synapse/http/site.py b/synapse/http/site.py index fc954e343c..21e26f9c5e 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -20,7 +20,7 @@ from twisted.web.server import Request, Site from synapse.http import redact_uri from synapse.http.request_metrics import RequestMetrics -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, ContextResourceUsage logger = logging.getLogger(__name__) @@ -95,15 +95,9 @@ class SynapseRequest(Request): def _finished_processing(self): try: context = LoggingContext.current_context() - ru_utime, ru_stime = context.get_resource_usage() - db_txn_count = context.db_txn_count - db_txn_duration_sec = context.db_txn_duration_sec - db_sched_duration_sec = context.db_sched_duration_sec - evt_db_fetch_count = context.evt_db_fetch_count + usage = context.get_resource_usage() except Exception: - ru_utime, ru_stime = (0, 0) - db_txn_count, db_txn_duration_sec = (0, 0) - evt_db_fetch_count = 0 + usage = ContextResourceUsage() end_time = time.time() @@ -130,18 +124,18 @@ class SynapseRequest(Request): self.site.site_tag, authenticated_entity, end_time - self.start_time, - ru_utime, - ru_stime, - db_sched_duration_sec, - db_txn_duration_sec, - int(db_txn_count), + usage.ru_utime, + usage.ru_stime, + usage.db_sched_duration_sec, + usage.db_txn_duration_sec, + int(usage.db_txn_count), self.sentLength, self.code, self.method, self.get_redacted_uri(), self.clientproto, user_agent, - evt_db_fetch_count, + usage.evt_db_fetch_count, ) try: diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index fe9288b031..44a0a56818 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -49,6 +49,90 @@ except Exception: return None +class ContextResourceUsage(object): + """Object for tracking the resources used by a log context + + Attributes: + ru_utime (float): user CPU time (in seconds) + ru_stime (float): system CPU time (in seconds) + db_txn_count (int): number of database transactions done + db_sched_duration_sec (float): amount of time spent waiting for a + database connection + db_txn_duration_sec (float): amount of time spent doing database + transactions (excluding scheduling time) + evt_db_fetch_count (int): number of events requested from the database + """ + + __slots__ = [ + "ru_stime", "ru_utime", + "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", + "evt_db_fetch_count", + ] + + def __init__(self, copy_from=None): + """Create a new ContextResourceUsage + + Args: + copy_from (ContextResourceUsage|None): if not None, an object to + copy stats from + """ + if copy_from is None: + self.reset() + else: + self.ru_utime = copy_from.ru_utime + self.ru_stime = copy_from.ru_stime + self.db_txn_count = copy_from.db_txn_count + + self.db_txn_duration_sec = copy_from.db_txn_duration_sec + self.db_sched_duration_sec = copy_from.db_sched_duration_sec + self.evt_db_fetch_count = copy_from.evt_db_fetch_count + + def copy(self): + return ContextResourceUsage(copy_from=self) + + def reset(self): + self.ru_stime = 0. + self.ru_utime = 0. + self.db_txn_count = 0 + + self.db_txn_duration_sec = 0 + self.db_sched_duration_sec = 0 + self.evt_db_fetch_count = 0 + + def __iadd__(self, other): + """Add another ContextResourceUsage's stats to this one's. + + Args: + other (ContextResourceUsage): the other resource usage object + """ + self.ru_utime += other.ru_utime + self.ru_stime += other.ru_stime + self.db_txn_count += other.db_txn_count + self.db_txn_duration_sec += other.db_txn_duration_sec + self.db_sched_duration_sec += other.db_sched_duration_sec + self.evt_db_fetch_count += other.evt_db_fetch_count + return self + + def __isub__(self, other): + self.ru_utime -= other.ru_utime + self.ru_stime -= other.ru_stime + self.db_txn_count -= other.db_txn_count + self.db_txn_duration_sec -= other.db_txn_duration_sec + self.db_sched_duration_sec -= other.db_sched_duration_sec + self.evt_db_fetch_count -= other.evt_db_fetch_count + return self + + def __add__(self, other): + res = ContextResourceUsage(copy_from=self) + res += other + return res + + def __sub__(self, other): + res = ContextResourceUsage(copy_from=self) + res -= other + return res + + class LoggingContext(object): """Additional context for log formatting. Contexts are scoped within a "with" block. @@ -58,9 +142,8 @@ class LoggingContext(object): """ __slots__ = [ - "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", - "evt_db_fetch_count", + "previous_context", "name", + "_resource_usage", "usage_start", "main_thread", "alive", "request", "tag", @@ -103,18 +186,9 @@ class LoggingContext(object): def __init__(self, name=None): self.previous_context = LoggingContext.current_context() self.name = name - self.ru_stime = 0. - self.ru_utime = 0. - self.db_txn_count = 0 - # sec spent waiting for db txns, excluding scheduling time - self.db_txn_duration_sec = 0 - - # sec spent waiting for db txns to be scheduled - self.db_sched_duration_sec = 0 - - # number of events this thread has fetched from the db - self.evt_db_fetch_count = 0 + # track the resources used by this context so far + self._resource_usage = ContextResourceUsage() # If alive has the thread resource usage when the logcontext last # became active. @@ -207,39 +281,43 @@ class LoggingContext(object): logger.warning("Stopped logcontext %s on different thread", self) return - # When we stop, let's record the resource used since we started - if self.usage_start: - usage_end = get_thread_resource_usage() + # When we stop, let's record the cpu used since we started + if not self.usage_start: + logger.warning( + "Called stop on logcontext %s without calling start", self, + ) + return + + usage_end = get_thread_resource_usage() - self.ru_utime += usage_end.ru_utime - self.usage_start.ru_utime - self.ru_stime += usage_end.ru_stime - self.usage_start.ru_stime + self._resource_usage.ru_utime += usage_end.ru_utime - self.usage_start.ru_utime + self._resource_usage.ru_stime += usage_end.ru_stime - self.usage_start.ru_stime - self.usage_start = None - else: - logger.warning("Called stop on logcontext %s without calling start", self) + self.usage_start = None def get_resource_usage(self): - """Get CPU time used by this logcontext so far. + """Get resources used by this logcontext so far. Returns: - tuple[float, float]: The user and system CPU usage in seconds + ContextResourceUsage: a *copy* of the object tracking resource + usage so far """ - ru_utime = self.ru_utime - ru_stime = self.ru_stime + # we always return a copy, for consistency + res = self._resource_usage.copy() # If we are on the correct thread and we're currently running then we # can include resource usage so far. is_main_thread = threading.current_thread() is self.main_thread if self.alive and self.usage_start and is_main_thread: current = get_thread_resource_usage() - ru_utime += current.ru_utime - self.usage_start.ru_utime - ru_stime += current.ru_stime - self.usage_start.ru_stime + res.ru_utime += current.ru_utime - self.usage_start.ru_utime + res.ru_stime += current.ru_stime - self.usage_start.ru_stime - return ru_utime, ru_stime + return res def add_database_transaction(self, duration_sec): - self.db_txn_count += 1 - self.db_txn_duration_sec += duration_sec + self._resource_usage.db_txn_count += 1 + self._resource_usage.db_txn_duration_sec += duration_sec def add_database_scheduled(self, sched_sec): """Record a use of the database pool @@ -248,7 +326,7 @@ class LoggingContext(object): sched_sec (float): number of seconds it took us to get a connection """ - self.db_sched_duration_sec += sched_sec + self._resource_usage.db_sched_duration_sec += sched_sec def record_event_fetch(self, event_count): """Record a number of events being fetched from the db @@ -256,7 +334,7 @@ class LoggingContext(object): Args: event_count (int): number of events being fetched """ - self.evt_db_fetch_count += event_count + self._resource_usage.evt_db_fetch_count += event_count class LoggingContextFilter(logging.Filter): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 63bc64c642..6ba7107896 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -60,10 +60,9 @@ def measure_func(name): class Measure(object): __slots__ = [ - "clock", "name", "start_context", "start", "new_context", "ru_utime", - "ru_stime", - "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", + "clock", "name", "start_context", "start", "created_context", + "start_usage", ] def __init__(self, clock, name): @@ -81,10 +80,7 @@ class Measure(object): self.start_context.__enter__() self.created_context = True - self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() - self.db_txn_count = self.start_context.db_txn_count - self.db_txn_duration_sec = self.start_context.db_txn_duration_sec - self.db_sched_duration_sec = self.start_context.db_sched_duration_sec + self.start_usage = self.start_context.get_resource_usage() def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: @@ -108,15 +104,12 @@ class Measure(object): logger.warn("Expected context. (%r)", self.name) return - ru_utime, ru_stime = context.get_resource_usage() - - block_ru_utime.labels(self.name).inc(ru_utime - self.ru_utime) - block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) - block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) - block_db_txn_duration.labels(self.name).inc( - context.db_txn_duration_sec - self.db_txn_duration_sec) - block_db_sched_duration.labels(self.name).inc( - context.db_sched_duration_sec - self.db_sched_duration_sec) + usage = context.get_resource_usage() - self.start_usage + block_ru_utime.labels(self.name).inc(usage.ru_utime) + block_ru_stime.labels(self.name).inc(usage.ru_stime) + block_db_txn_count.labels(self.name).inc(usage.db_txn_count) + block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec) + block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.5.1 From c3c29aa19625324ac9a7a8ebcdd58a9c0b457f74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Jul 2018 16:12:36 +0100 Subject: Attempt to include db threads in cpu usage stats (#3496) Let's try to include time spent in the DB threads in the per-request/block cpu usage metrics. --- changelog.d/3496.feature | 1 + synapse/storage/_base.py | 32 +++++++++++++++----------------- synapse/storage/events_worker.py | 3 ++- synapse/util/logcontext.py | 23 +++++++++++++++++++++-- 4 files changed, 39 insertions(+), 20 deletions(-) create mode 100644 changelog.d/3496.feature (limited to 'synapse/util') diff --git a/changelog.d/3496.feature b/changelog.d/3496.feature new file mode 100644 index 0000000000..6a06a7e755 --- /dev/null +++ b/changelog.d/3496.feature @@ -0,0 +1 @@ +Include CPU time from database threads in request/block metrics. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 1fd5d8f162..98dde77431 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -220,7 +220,7 @@ class SQLBaseStore(object): self._clock.looping_call(loop, 10000) def _new_transaction(self, conn, desc, after_callbacks, exception_callbacks, - logging_context, func, *args, **kwargs): + func, *args, **kwargs): start = time.time() txn_id = self._TXN_ID @@ -284,8 +284,7 @@ class SQLBaseStore(object): end = time.time() duration = end - start - if logging_context is not None: - logging_context.add_database_transaction(duration) + LoggingContext.current_context().add_database_transaction(duration) transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) @@ -309,19 +308,15 @@ class SQLBaseStore(object): Returns: Deferred: The result of func """ - current_context = LoggingContext.current_context() - after_callbacks = [] exception_callbacks = [] - def inner_func(conn, *args, **kwargs): - return self._new_transaction( - conn, desc, after_callbacks, exception_callbacks, current_context, - func, *args, **kwargs - ) - try: - result = yield self.runWithConnection(inner_func, *args, **kwargs) + result = yield self.runWithConnection( + self._new_transaction, + desc, after_callbacks, exception_callbacks, func, + *args, **kwargs + ) for after_callback, after_args, after_kwargs in after_callbacks: after_callback(*after_args, **after_kwargs) @@ -346,22 +341,25 @@ class SQLBaseStore(object): Returns: Deferred: The result of func """ - current_context = LoggingContext.current_context() + parent_context = LoggingContext.current_context() + if parent_context == LoggingContext.sentinel: + logger.warn( + "Running db txn from sentinel context: metrics will be lost", + ) + parent_context = None start_time = time.time() def inner_func(conn, *args, **kwargs): - with LoggingContext("runWithConnection") as context: + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = time.time() - start_time sql_scheduling_timer.observe(sched_duration_sec) - current_context.add_database_scheduled(sched_duration_sec) + context.add_database_scheduled(sched_duration_sec) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() - current_context.copy_to(context) - return func(conn, *args, **kwargs) with PreserveLoggingContext(): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index fa2659403d..67433606c6 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -261,7 +261,8 @@ class EventsWorkerStore(SQLBaseStore): ] rows = self._new_transaction( - conn, "do_fetch", [], [], None, self._fetch_event_rows, event_ids + conn, "do_fetch", [], [], + self._fetch_event_rows, event_ids, ) row_dict = { diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 44a0a56818..f6c7175f74 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -137,12 +137,18 @@ class LoggingContext(object): """Additional context for log formatting. Contexts are scoped within a "with" block. + If a parent is given when creating a new context, then: + - logging fields are copied from the parent to the new context on entry + - when the new context exits, the cpu usage stats are copied from the + child to the parent + Args: name (str): Name for the context for debugging. + parent_context (LoggingContext|None): The parent of the new context """ __slots__ = [ - "previous_context", "name", + "previous_context", "name", "parent_context", "_resource_usage", "usage_start", "main_thread", "alive", @@ -183,7 +189,7 @@ class LoggingContext(object): sentinel = Sentinel() - def __init__(self, name=None): + def __init__(self, name=None, parent_context=None): self.previous_context = LoggingContext.current_context() self.name = name @@ -199,6 +205,8 @@ class LoggingContext(object): self.tag = "" self.alive = True + self.parent_context = parent_context + def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -236,6 +244,10 @@ class LoggingContext(object): self.previous_context, old_context ) self.alive = True + + if self.parent_context is not None: + self.parent_context.copy_to(self) + return self def __exit__(self, type, value, traceback): @@ -257,6 +269,13 @@ class LoggingContext(object): self.previous_context = None self.alive = False + # if we have a parent, pass our CPU usage stats on + if self.parent_context is not None: + self.parent_context._resource_usage += self._resource_usage + + # reset them in case we get entered again + self._resource_usage.reset() + def copy_to(self, record): """Copy logging fields from this context to a log record or another LoggingContext -- cgit 1.5.1 From fa5c2bc082185580e7d65a8759a8b5213933a137 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 12 Jul 2018 11:37:44 +0100 Subject: Reduce set building in get_entities_changed This line shows up as about 5% of cpu time on a synchrotron: not_known_entities = set(entities) - set(self._entity_to_key) Presumably the problem here is that _entity_to_key can be largeish, and building a set for its keys every time this function is called is slow. Here we rewrite the logic to avoid building so many sets. --- synapse/util/caches/stream_change_cache.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 8637867c6d..a1f8ff8f10 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -74,14 +74,18 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - not_known_entities = set(entities) - set(self._entity_to_key) - - result = ( - {self._cache[k] for k in self._cache.islice( - start=self._cache.bisect_right(stream_pos))} - .intersection(entities) - .union(not_known_entities) - ) + changed_entities = { + self._cache[k] for k in self._cache.islice( + start=self._cache.bisect_right(stream_pos), + ) + } + + # we need to include entities which we don't know about, as well as + # those which are known to have changed since the stream pos. + result = { + e for e in entities + if e in changed_entities or e not in self._entity_to_key + } self.metrics.inc_hits() else: -- cgit 1.5.1 From 77b692e65dfe5da79e9f69f4a24bde1c768a998c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Jul 2018 15:26:10 +0100 Subject: Don't return unknown entities in get_entities_changed The stream cache keeps track of all entities that have changed since a particular stream position, so get_entities_changed does not need to return unknown entites when given a larger stream position. This makes it consistent with the behaviour of has_entity_changed. --- synapse/util/caches/stream_change_cache.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index a1f8ff8f10..258655349b 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -74,19 +74,12 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - changed_entities = { + result = { self._cache[k] for k in self._cache.islice( start=self._cache.bisect_right(stream_pos), ) } - # we need to include entities which we don't know about, as well as - # those which are known to have changed since the stream pos. - result = { - e for e in entities - if e in changed_entities or e not in self._entity_to_key - } - self.metrics.inc_hits() else: result = set(entities) -- cgit 1.5.1 From 33b40d0a2590a6d875548c27408c2d35d3269f33 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Jul 2018 16:19:40 +0100 Subject: Make FederationRateLimiter queue requests properly popitem removes the *most recent* item by default [1]. We want the oldest. Fixes #3524 [1]: https://docs.python.org/2/library/collections.html#collections.OrderedDict.popitem --- synapse/util/ratelimitutils.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 5ac33b2132..7deb38f2a7 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -92,13 +92,22 @@ class _PerHostRatelimiter(object): self.window_size = window_size self.sleep_limit = sleep_limit - self.sleep_msec = sleep_msec + self.sleep_sec = sleep_msec / 1000.0 self.reject_limit = reject_limit self.concurrent_requests = concurrent_requests + # request_id objects for requests which have been slept self.sleeping_requests = set() + + # map from request_id object to Deferred for requests which are ready + # for processing but have been queued self.ready_request_queue = collections.OrderedDict() + + # request id objects for requests which are in progress self.current_processing = set() + + # times at which we have recently (within the last window_size ms) + # received requests. self.request_times = [] @contextlib.contextmanager @@ -117,11 +126,15 @@ class _PerHostRatelimiter(object): def _on_enter(self, request_id): time_now = self.clock.time_msec() + + # remove any entries from request_times which aren't within the window self.request_times[:] = [ r for r in self.request_times if time_now - r < self.window_size ] + # reject the request if we already have too many queued up (either + # sleeping or in the ready queue). queue_size = len(self.ready_request_queue) + len(self.sleeping_requests) if queue_size > self.reject_limit: raise LimitExceededError( @@ -134,9 +147,13 @@ class _PerHostRatelimiter(object): def queue_request(): if len(self.current_processing) > self.concurrent_requests: - logger.debug("Ratelimit [%s]: Queue req", id(request_id)) queue_defer = defer.Deferred() self.ready_request_queue[request_id] = queue_defer + logger.info( + "Ratelimiter: queueing request (queue now %i items)", + len(self.ready_request_queue), + ) + return queue_defer else: return defer.succeed(None) @@ -148,10 +165,9 @@ class _PerHostRatelimiter(object): if len(self.request_times) > self.sleep_limit: logger.debug( - "Ratelimit [%s]: sleeping req", - id(request_id), + "Ratelimiter: sleeping request for %f sec", self.sleep_sec, ) - ret_defer = run_in_background(self.clock.sleep, self.sleep_msec / 1000.0) + ret_defer = run_in_background(self.clock.sleep, self.sleep_sec) self.sleeping_requests.add(request_id) @@ -200,11 +216,8 @@ class _PerHostRatelimiter(object): ) self.current_processing.discard(request_id) try: - request_id, deferred = self.ready_request_queue.popitem() - - # XXX: why do we do the following? the on_start callback above will - # do it for us. - self.current_processing.add(request_id) + # start processing the next item on the queue. + _, deferred = self.ready_request_queue.popitem(last=False) with PreserveLoggingContext(): deferred.callback(None) -- cgit 1.5.1 From 547b1355d3747b267db3e21aefd143382f49f4ec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Jul 2018 10:27:51 +0100 Subject: Fix perf regression in PR #3530 The get_entities_changed function was changed to return all changed entities since the given stream position, rather than only those changed from a given list of entities. This resulted in the function incorrectly returning large numbers of entities that, for example, caused large increases in database usage. --- synapse/util/caches/stream_change_cache.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 258655349b..c1e76b1a0e 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -74,12 +74,17 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos >= self._earliest_known_stream_pos: - result = { + changed_entities = { self._cache[k] for k in self._cache.islice( start=self._cache.bisect_right(stream_pos), ) } + result = { + e for e in entities + if e in changed_entities + } + self.metrics.inc_hits() else: result = set(entities) -- cgit 1.5.1 From b2aa05a8d6b1b2fb9e1efcc6fb03f1b49bc1be1d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Jul 2018 11:07:04 +0100 Subject: Use efficient .intersection --- synapse/util/caches/stream_change_cache.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index c1e76b1a0e..f2bde74dc5 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -80,10 +80,7 @@ class StreamChangeCache(object): ) } - result = { - e for e in entities - if e in changed_entities - } + result = changed_entities.intersection(entities) self.metrics.inc_hits() else: -- cgit 1.5.1 From 667fba68f3ca808f48143a2a739a54665b0162c6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 18 Jul 2018 14:35:24 +0100 Subject: Run things as background processes This fixes #3518, and ensures that we get useful logs and metrics for lots of things that happen in the background. (There are certainly more things that happen in the background; these are just the common ones I've found running a single-process synapse locally). --- synapse/federation/transaction_queue.py | 15 ++++++--------- synapse/storage/background_updates.py | 10 ++++++++-- synapse/storage/client_ips.py | 15 +++++++++++---- synapse/storage/events.py | 10 ++++------ synapse/storage/events_worker.py | 10 ++++++---- synapse/util/caches/expiringcache.py | 6 +++++- synapse/util/distributor.py | 4 ++++ 7 files changed, 44 insertions(+), 26 deletions(-) (limited to 'synapse/util') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 5c5a73b73c..6996d6b695 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -168,7 +168,7 @@ class TransactionQueue(object): # fire off a processing loop in the background run_as_background_process( - "process_transaction_queue", + "process_event_queue_for_federation", self._process_event_queue_loop, ) @@ -434,14 +434,11 @@ class TransactionQueue(object): logger.debug("TX [%s] Starting transaction loop", destination) - # Drop the logcontext before starting the transaction. It doesn't - # really make sense to log all the outbound transactions against - # whatever path led us to this point: that's pretty arbitrary really. - # - # (this also means we can fire off _perform_transaction without - # yielding) - with logcontext.PreserveLoggingContext(): - self._transaction_transmission_loop(destination) + run_as_background_process( + "federation_transaction_transmission_loop", + self._transaction_transmission_loop, + destination, + ) @defer.inlineCallbacks def _transaction_transmission_loop(self, destination): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index dc9eca7d15..5fe1ca2de7 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -19,6 +19,8 @@ from canonicaljson import json from twisted.internet import defer +from synapse.metrics.background_process_metrics import run_as_background_process + from . import engines from ._base import SQLBaseStore @@ -87,10 +89,14 @@ class BackgroundUpdateStore(SQLBaseStore): self._background_update_handlers = {} self._all_done = False - @defer.inlineCallbacks def start_doing_background_updates(self): - logger.info("Starting background schema updates") + run_as_background_process( + "background_updates", self._run_background_updates, + ) + @defer.inlineCallbacks + def _run_background_updates(self): + logger.info("Starting background schema updates") while True: yield self.hs.get_clock().sleep( self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index b78eda3413..77ae10da3d 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -19,6 +19,7 @@ from six import iteritems from twisted.internet import defer +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import CACHE_SIZE_FACTOR from . import background_updates @@ -93,10 +94,16 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): self._batch_row_update[key] = (user_agent, device_id, now) def _update_client_ips_batch(self): - to_update = self._batch_row_update - self._batch_row_update = {} - return self.runInteraction( - "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update + def update(): + to_update = self._batch_row_update + self._batch_row_update = {} + return self.runInteraction( + "_update_client_ips_batch", self._update_client_ips_batch_txn, + to_update, + ) + + run_as_background_process( + "update_client_ips", update, ) def _update_client_ips_batch_txn(self, txn, to_update): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2aaab0d02c..4ff0fdc4ab 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -33,12 +33,13 @@ from synapse.api.errors import SynapseError # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util.async import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -155,11 +156,8 @@ class _EventPeristenceQueue(object): self._event_persist_queues[room_id] = queue self._currently_persisting_rooms.discard(room_id) - # set handle_queue_loop off on the background. We don't want to - # attribute work done in it to the current request, so we drop the - # logcontext altogether. - with PreserveLoggingContext(): - handle_queue_loop() + # set handle_queue_loop off in the background + run_as_background_process("persist_events", handle_queue_loop) def _get_drainining_queue(self, room_id): queue = self._event_persist_queues.setdefault(room_id, deque()) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 67433606c6..f28239a808 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -25,6 +25,7 @@ from synapse.events import EventBase # noqa: F401 from synapse.events import FrozenEvent from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.logcontext import ( LoggingContext, PreserveLoggingContext, @@ -322,10 +323,11 @@ class EventsWorkerStore(SQLBaseStore): should_start = False if should_start: - with PreserveLoggingContext(): - self.runWithConnection( - self._do_fetch - ) + run_as_background_process( + "fetch_events", + self.runWithConnection, + self._do_fetch, + ) logger.debug("Loading %d events", len(events)) with PreserveLoggingContext(): diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 4abca91f6d..465adc54a8 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,6 +16,7 @@ import logging from collections import OrderedDict +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache logger = logging.getLogger(__name__) @@ -63,7 +64,10 @@ class ExpiringCache(object): return def f(): - self._prune_cache() + run_as_background_process( + "prune_cache_%s" % self._cache_name, + self._prune_cache, + ) self._clock.looping_call(f, self._expiry_ms / 2) diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 734331caaa..d91ae400eb 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -75,6 +75,10 @@ class Distributor(object): self.pre_registration[name].append(observer) def fire(self, name, *args, **kwargs): + """Dispatches the given signal to the registered observers. + + Runs the observers as a background process. Does not return a deferred. + """ if name not in self.signals: raise KeyError("%r does not have a signal named %s" % (self, name)) -- cgit 1.5.1 From 8c69b735e3186875a8bf676241f990470fb7c592 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 18 Jul 2018 15:33:13 +0100 Subject: Make Distributor run its processes as a background process This is more involved than it might otherwise be, because the current implementation just drops its logcontexts and runs everything in the sentinel context. It turns out that we aren't actually using a bunch of the functionality here (notably suppress_failures and the fact that Distributor.fire returns a deferred), so the easiest way to fix this is actually by simplifying a bunch of code. --- synapse/util/distributor.py | 44 +++++++++++++++-------------------- tests/test_distributor.py | 56 ++++----------------------------------------- 2 files changed, 22 insertions(+), 78 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index d91ae400eb..194da87639 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -17,20 +17,18 @@ import logging from twisted.internet import defer -from synapse.util import unwrapFirstError -from synapse.util.logcontext import PreserveLoggingContext +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) def user_left_room(distributor, user, room_id): - with PreserveLoggingContext(): - distributor.fire("user_left_room", user=user, room_id=room_id) + distributor.fire("user_left_room", user=user, room_id=room_id) def user_joined_room(distributor, user, room_id): - with PreserveLoggingContext(): - distributor.fire("user_joined_room", user=user, room_id=room_id) + distributor.fire("user_joined_room", user=user, room_id=room_id) class Distributor(object): @@ -44,9 +42,7 @@ class Distributor(object): model will do for today. """ - def __init__(self, suppress_failures=True): - self.suppress_failures = suppress_failures - + def __init__(self): self.signals = {} self.pre_registration = {} @@ -56,7 +52,6 @@ class Distributor(object): self.signals[name] = Signal( name, - suppress_failures=self.suppress_failures, ) if name in self.pre_registration: @@ -82,7 +77,11 @@ class Distributor(object): if name not in self.signals: raise KeyError("%r does not have a signal named %s" % (self, name)) - return self.signals[name].fire(*args, **kwargs) + run_as_background_process( + name, + self.signals[name].fire, + *args, **kwargs + ) class Signal(object): @@ -95,9 +94,8 @@ class Signal(object): method into all of the observers. """ - def __init__(self, name, suppress_failures): + def __init__(self, name): self.name = name - self.suppress_failures = suppress_failures self.observers = [] def observe(self, observer): @@ -107,7 +105,6 @@ class Signal(object): Each observer callable may return a Deferred.""" self.observers.append(observer) - @defer.inlineCallbacks def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is @@ -125,22 +122,17 @@ class Signal(object): failure.type, failure.value, failure.getTracebackObject())) - if not self.suppress_failures: - return failure return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb) - with PreserveLoggingContext(): - deferreds = [ - do(observer) - for observer in self.observers - ] - - res = yield defer.gatherResults( - deferreds, consumeErrors=True - ).addErrback(unwrapFirstError) + deferreds = [ + run_in_background(do, o) + for o in self.observers + ] - defer.returnValue(res) + return make_deferred_yieldable(defer.gatherResults( + deferreds, consumeErrors=True, + )) def __repr__(self): return "" % (self.name,) diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 04a88056f1..71d11cda77 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +16,6 @@ from mock import Mock, patch -from twisted.internet import defer - from synapse.util.distributor import Distributor from . import unittest @@ -27,38 +26,15 @@ class DistributorTestCase(unittest.TestCase): def setUp(self): self.dist = Distributor() - @defer.inlineCallbacks def test_signal_dispatch(self): self.dist.declare("alert") observer = Mock() self.dist.observe("alert", observer) - d = self.dist.fire("alert", 1, 2, 3) - yield d - self.assertTrue(d.called) + self.dist.fire("alert", 1, 2, 3) observer.assert_called_with(1, 2, 3) - @defer.inlineCallbacks - def test_signal_dispatch_deferred(self): - self.dist.declare("whine") - - d_inner = defer.Deferred() - - def observer(): - return d_inner - - self.dist.observe("whine", observer) - - d_outer = self.dist.fire("whine") - - self.assertFalse(d_outer.called) - - d_inner.callback(None) - yield d_outer - self.assertTrue(d_outer.called) - - @defer.inlineCallbacks def test_signal_catch(self): self.dist.declare("alarm") @@ -71,9 +47,7 @@ class DistributorTestCase(unittest.TestCase): with patch( "synapse.util.distributor.logger", spec=["warning"] ) as mock_logger: - d = self.dist.fire("alarm", "Go") - yield d - self.assertTrue(d.called) + self.dist.fire("alarm", "Go") observers[0].assert_called_once_with("Go") observers[1].assert_called_once_with("Go") @@ -83,34 +57,12 @@ class DistributorTestCase(unittest.TestCase): mock_logger.warning.call_args[0][0], str ) - @defer.inlineCallbacks - def test_signal_catch_no_suppress(self): - # Gut-wrenching - self.dist.suppress_failures = False - - self.dist.declare("whail") - - class MyException(Exception): - pass - - @defer.inlineCallbacks - def observer(): - raise MyException("Oopsie") - - self.dist.observe("whail", observer) - - d = self.dist.fire("whail") - - yield self.assertFailure(d, MyException) - self.dist.suppress_failures = True - - @defer.inlineCallbacks def test_signal_prereg(self): observer = Mock() self.dist.observe("flare", observer) self.dist.declare("flare") - yield self.dist.fire("flare", 4, 5) + self.dist.fire("flare", 4, 5) observer.assert_called_with(4, 5) -- cgit 1.5.1 From 95ccb6e2ec57f2150a697ea9cde030e8f78d6db9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 19 Jul 2018 20:58:18 +1000 Subject: Don't spew errors because we can't save metrics (#3563) --- changelog.d/3548.bugfix | 1 + synapse/util/logcontext.py | 11 +++++++++++ synapse/util/metrics.py | 19 +++++++++++++------ 3 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 changelog.d/3548.bugfix (limited to 'synapse/util') diff --git a/changelog.d/3548.bugfix b/changelog.d/3548.bugfix new file mode 100644 index 0000000000..38dc3b1232 --- /dev/null +++ b/changelog.d/3548.bugfix @@ -0,0 +1 @@ +Catch failures saving metrics captured by Measure, and instead log the faulty metrics information for further analysis. diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index f6c7175f74..8dcae50b39 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -99,6 +99,17 @@ class ContextResourceUsage(object): self.db_sched_duration_sec = 0 self.evt_db_fetch_count = 0 + def __repr__(self): + return ("") % ( + self.ru_stime, + self.ru_utime, + self.db_txn_count, + self.db_txn_duration_sec, + self.db_sched_duration_sec, + self.evt_db_fetch_count,) + def __iadd__(self, other): """Add another ContextResourceUsage's stats to this one's. diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 6ba7107896..97f1267380 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -104,12 +104,19 @@ class Measure(object): logger.warn("Expected context. (%r)", self.name) return - usage = context.get_resource_usage() - self.start_usage - block_ru_utime.labels(self.name).inc(usage.ru_utime) - block_ru_stime.labels(self.name).inc(usage.ru_stime) - block_db_txn_count.labels(self.name).inc(usage.db_txn_count) - block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec) - block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec) + current = context.get_resource_usage() + usage = current - self.start_usage + try: + block_ru_utime.labels(self.name).inc(usage.ru_utime) + block_ru_stime.labels(self.name).inc(usage.ru_stime) + block_db_txn_count.labels(self.name).inc(usage.db_txn_count) + block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec) + block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec) + except ValueError: + logger.warn( + "Failed to save metrics! OLD: %r, NEW: %r", + self.start_usage, current + ) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.5.1 From d7275eecf3abcd5f7c645e5da47c7e7310350333 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Jul 2018 12:37:12 +0100 Subject: Add a sleep to the Limiter to fix stack overflows. Fixes #3570 --- synapse/util/async.py | 23 ++++++++++++++++++++--- tests/util/test_limiter.py | 8 ++++---- 2 files changed, 24 insertions(+), 7 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/async.py b/synapse/util/async.py index 5d0fb39130..7d5acecb1c 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -248,11 +248,15 @@ class Limiter(object): # do some work. """ - def __init__(self, max_count): + def __init__(self, max_count, clock=None): """ Args: max_count(int): The maximum number of concurrent access """ + if not clock: + from twisted.internet import reactor + clock = Clock(reactor) + self._clock = clock self.max_count = max_count # key_to_defer is a map from the key to a 2 element list where @@ -277,10 +281,23 @@ class Limiter(object): with PreserveLoggingContext(): yield new_defer logger.info("Acquired limiter lock for key %r", key) + entry[0] += 1 + + # if the code holding the lock completes synchronously, then it + # will recursively run the next claimant on the list. That can + # relatively rapidly lead to stack exhaustion. This is essentially + # the same problem as http://twistedmatrix.com/trac/ticket/9304. + # + # In order to break the cycle, we add a cheeky sleep(0) here to + # ensure that we fall back to the reactor between each iteration. + # + # (This needs to happen while we hold the lock, and the context manager's exit + # code must be synchronous, so this is the only sensible place.) + yield self._clock.sleep(0) + else: logger.info("Acquired uncontended limiter lock for key %r", key) - - entry[0] += 1 + entry[0] += 1 @contextmanager def _ctx_manager(): diff --git a/tests/util/test_limiter.py b/tests/util/test_limiter.py index a5a767b1ff..f7b942f5c1 100644 --- a/tests/util/test_limiter.py +++ b/tests/util/test_limiter.py @@ -48,21 +48,21 @@ class LimiterTestCase(unittest.TestCase): self.assertFalse(d4.called) self.assertFalse(d5.called) - self.assertTrue(d4.called) + cm4 = yield d4 self.assertFalse(d5.called) with cm3: self.assertFalse(d5.called) - self.assertTrue(d5.called) + cm5 = yield d5 with cm2: pass - with (yield d4): + with cm4: pass - with (yield d5): + with cm5: pass d6 = limiter.queue(key) -- cgit 1.5.1 From 8462c26485fb4f19fc52accc05870c0ea4c8eb6a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Jul 2018 12:43:23 +0100 Subject: Improvements to the Limiter * give them names, to improve logging * use a deque rather than a list for efficiency --- synapse/handlers/message.py | 2 +- synapse/util/async.py | 33 ++++++++++++++++++++------------- 2 files changed, 21 insertions(+), 14 deletions(-) (limited to 'synapse/util') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a39b852ceb..8c12c6990f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -427,7 +427,7 @@ class EventCreationHandler(object): # We arbitrarily limit concurrent event creation for a room to 5. # This is to stop us from diverging history *too* much. - self.limiter = Limiter(max_count=5) + self.limiter = Limiter(max_count=5, name="room_event_creation_limit") self.action_generator = hs.get_action_generator() diff --git a/synapse/util/async.py b/synapse/util/async.py index 7d5acecb1c..22071ddef7 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import collections import logging from contextlib import contextmanager @@ -248,11 +249,16 @@ class Limiter(object): # do some work. """ - def __init__(self, max_count, clock=None): + def __init__(self, max_count=1, name=None, clock=None): """ Args: - max_count(int): The maximum number of concurrent access + max_count(int): The maximum number of concurrent accesses """ + if name is None: + self.name = id(self) + else: + self.name = name + if not clock: from twisted.internet import reactor clock = Clock(reactor) @@ -260,14 +266,14 @@ class Limiter(object): self.max_count = max_count # key_to_defer is a map from the key to a 2 element list where - # the first element is the number of things executing - # the second element is a list of deferreds for the things blocked from + # the first element is the number of things executing, and + # the second element is a deque of deferreds for the things blocked from # executing. self.key_to_defer = {} @defer.inlineCallbacks def queue(self, key): - entry = self.key_to_defer.setdefault(key, [0, []]) + entry = self.key_to_defer.setdefault(key, [0, collections.deque()]) # If the number of things executing is greater than the maximum # then add a deferred to the list of blocked items @@ -277,10 +283,10 @@ class Limiter(object): new_defer = defer.Deferred() entry[1].append(new_defer) - logger.info("Waiting to acquire limiter lock for key %r", key) - with PreserveLoggingContext(): - yield new_defer - logger.info("Acquired limiter lock for key %r", key) + logger.info("Waiting to acquire limiter lock %r for key %r", self.name, key) + yield make_deferred_yieldable(new_defer) + + logger.info("Acquired limiter lock %r for key %r", self.name, key) entry[0] += 1 # if the code holding the lock completes synchronously, then it @@ -296,7 +302,7 @@ class Limiter(object): yield self._clock.sleep(0) else: - logger.info("Acquired uncontended limiter lock for key %r", key) + logger.info("Acquired uncontended limiter lock %r for key %r", self.name, key) entry[0] += 1 @contextmanager @@ -304,15 +310,16 @@ class Limiter(object): try: yield finally: - logger.info("Releasing limiter lock for key %r", key) + logger.info("Releasing limiter lock %r for key %r", self.name, key) # We've finished executing so check if there are any things # blocked waiting to execute and start one of them entry[0] -= 1 if entry[1]: - next_def = entry[1].pop(0) + next_def = entry[1].popleft() + # we need to run the next thing in the sentinel context. with PreserveLoggingContext(): next_def.callback(None) elif entry[0] == 0: -- cgit 1.5.1 From 7c712f95bbc7f405355d5714c92d65551f64fec2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Jul 2018 13:11:43 +0100 Subject: Combine Limiter and Linearizer Linearizer was effectively a Limiter with max_count=1, so rather than maintaining two sets of code, let's combine them. --- synapse/handlers/message.py | 4 +- synapse/util/async.py | 99 +++++-------------------------------------- tests/util/test_limiter.py | 70 ------------------------------ tests/util/test_linearizer.py | 47 ++++++++++++++++++++ 4 files changed, 59 insertions(+), 161 deletions(-) delete mode 100644 tests/util/test_limiter.py (limited to 'synapse/util') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8c12c6990f..abc07ea87c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -33,7 +33,7 @@ from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.replication.http.send_event import send_event_to_master from synapse.types import RoomAlias, RoomStreamToken, UserID -from synapse.util.async import Limiter, ReadWriteLock +from synapse.util.async import Linearizer, ReadWriteLock from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.logcontext import run_in_background from synapse.util.metrics import measure_func @@ -427,7 +427,7 @@ class EventCreationHandler(object): # We arbitrarily limit concurrent event creation for a room to 5. # This is to stop us from diverging history *too* much. - self.limiter = Limiter(max_count=5, name="room_event_creation_limit") + self.limiter = Linearizer(max_count=5, name="room_event_creation_limit") self.action_generator = hs.get_action_generator() diff --git a/synapse/util/async.py b/synapse/util/async.py index 22071ddef7..5a50d9700f 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -157,91 +157,8 @@ def concurrently_execute(func, args, limit): class Linearizer(object): - """Linearizes access to resources based on a key. Useful to ensure only one - thing is happening at a time on a given resource. - - Example: - - with (yield linearizer.queue("test_key")): - # do some work. - - """ - def __init__(self, name=None, clock=None): - if name is None: - self.name = id(self) - else: - self.name = name - self.key_to_defer = {} - - if not clock: - from twisted.internet import reactor - clock = Clock(reactor) - self._clock = clock - - @defer.inlineCallbacks - def queue(self, key): - # If there is already a deferred in the queue, we pull it out so that - # we can wait on it later. - # Then we replace it with a deferred that we resolve *after* the - # context manager has exited. - # We only return the context manager after the previous deferred has - # resolved. - # This all has the net effect of creating a chain of deferreds that - # wait for the previous deferred before starting their work. - current_defer = self.key_to_defer.get(key) - - new_defer = defer.Deferred() - self.key_to_defer[key] = new_defer - - if current_defer: - logger.info( - "Waiting to acquire linearizer lock %r for key %r", self.name, key - ) - try: - with PreserveLoggingContext(): - yield current_defer - except Exception: - logger.exception("Unexpected exception in Linearizer") - - logger.info("Acquired linearizer lock %r for key %r", self.name, - key) - - # if the code holding the lock completes synchronously, then it - # will recursively run the next claimant on the list. That can - # relatively rapidly lead to stack exhaustion. This is essentially - # the same problem as http://twistedmatrix.com/trac/ticket/9304. - # - # In order to break the cycle, we add a cheeky sleep(0) here to - # ensure that we fall back to the reactor between each iteration. - # - # (There's no particular need for it to happen before we return - # the context manager, but it needs to happen while we hold the - # lock, and the context manager's exit code must be synchronous, - # so actually this is the only sensible place. - yield self._clock.sleep(0) - - else: - logger.info("Acquired uncontended linearizer lock %r for key %r", - self.name, key) - - @contextmanager - def _ctx_manager(): - try: - yield - finally: - logger.info("Releasing linearizer lock %r for key %r", self.name, key) - with PreserveLoggingContext(): - new_defer.callback(None) - current_d = self.key_to_defer.get(key) - if current_d is new_defer: - self.key_to_defer.pop(key, None) - - defer.returnValue(_ctx_manager()) - - -class Limiter(object): """Limits concurrent access to resources based on a key. Useful to ensure - only a few thing happen at a time on a given resource. + only a few things happen at a time on a given resource. Example: @@ -249,7 +166,7 @@ class Limiter(object): # do some work. """ - def __init__(self, max_count=1, name=None, clock=None): + def __init__(self, name=None, max_count=1, clock=None): """ Args: max_count(int): The maximum number of concurrent accesses @@ -283,10 +200,12 @@ class Limiter(object): new_defer = defer.Deferred() entry[1].append(new_defer) - logger.info("Waiting to acquire limiter lock %r for key %r", self.name, key) + logger.info( + "Waiting to acquire linearizer lock %r for key %r", self.name, key, + ) yield make_deferred_yieldable(new_defer) - logger.info("Acquired limiter lock %r for key %r", self.name, key) + logger.info("Acquired linearizer lock %r for key %r", self.name, key) entry[0] += 1 # if the code holding the lock completes synchronously, then it @@ -302,7 +221,9 @@ class Limiter(object): yield self._clock.sleep(0) else: - logger.info("Acquired uncontended limiter lock %r for key %r", self.name, key) + logger.info( + "Acquired uncontended linearizer lock %r for key %r", self.name, key, + ) entry[0] += 1 @contextmanager @@ -310,7 +231,7 @@ class Limiter(object): try: yield finally: - logger.info("Releasing limiter lock %r for key %r", self.name, key) + logger.info("Releasing linearizer lock %r for key %r", self.name, key) # We've finished executing so check if there are any things # blocked waiting to execute and start one of them diff --git a/tests/util/test_limiter.py b/tests/util/test_limiter.py deleted file mode 100644 index f7b942f5c1..0000000000 --- a/tests/util/test_limiter.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.internet import defer - -from synapse.util.async import Limiter - -from tests import unittest - - -class LimiterTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def test_limiter(self): - limiter = Limiter(3) - - key = object() - - d1 = limiter.queue(key) - cm1 = yield d1 - - d2 = limiter.queue(key) - cm2 = yield d2 - - d3 = limiter.queue(key) - cm3 = yield d3 - - d4 = limiter.queue(key) - self.assertFalse(d4.called) - - d5 = limiter.queue(key) - self.assertFalse(d5.called) - - with cm1: - self.assertFalse(d4.called) - self.assertFalse(d5.called) - - cm4 = yield d4 - self.assertFalse(d5.called) - - with cm3: - self.assertFalse(d5.called) - - cm5 = yield d5 - - with cm2: - pass - - with cm4: - pass - - with cm5: - pass - - d6 = limiter.queue(key) - with (yield d6): - pass diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index c95907b32c..c9563124f9 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,3 +66,49 @@ class LinearizerTestCase(unittest.TestCase): func(i) return func(1000) + + @defer.inlineCallbacks + def test_multiple_entries(self): + limiter = Linearizer(max_count=3) + + key = object() + + d1 = limiter.queue(key) + cm1 = yield d1 + + d2 = limiter.queue(key) + cm2 = yield d2 + + d3 = limiter.queue(key) + cm3 = yield d3 + + d4 = limiter.queue(key) + self.assertFalse(d4.called) + + d5 = limiter.queue(key) + self.assertFalse(d5.called) + + with cm1: + self.assertFalse(d4.called) + self.assertFalse(d5.called) + + cm4 = yield d4 + self.assertFalse(d5.called) + + with cm3: + self.assertFalse(d5.called) + + cm5 = yield d5 + + with cm2: + pass + + with cm4: + pass + + with cm5: + pass + + d6 = limiter.queue(key) + with (yield d6): + pass -- cgit 1.5.1 From 3d6df846580ce6ef8769945e6990af2f44251e40 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Jul 2018 13:59:55 +0100 Subject: Test and fix support for cancellation in Linearizer --- synapse/util/async.py | 28 ++++++++++++++++++++++------ tests/util/test_linearizer.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 6 deletions(-) (limited to 'synapse/util') diff --git a/synapse/util/async.py b/synapse/util/async.py index 5a50d9700f..a7094e2fb4 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -184,13 +184,13 @@ class Linearizer(object): # key_to_defer is a map from the key to a 2 element list where # the first element is the number of things executing, and - # the second element is a deque of deferreds for the things blocked from - # executing. + # the second element is an OrderedDict, where the keys are deferreds for the + # things blocked from executing. self.key_to_defer = {} @defer.inlineCallbacks def queue(self, key): - entry = self.key_to_defer.setdefault(key, [0, collections.deque()]) + entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()]) # If the number of things executing is greater than the maximum # then add a deferred to the list of blocked items @@ -198,12 +198,28 @@ class Linearizer(object): # this item so that it can continue executing. if entry[0] >= self.max_count: new_defer = defer.Deferred() - entry[1].append(new_defer) + entry[1][new_defer] = 1 logger.info( "Waiting to acquire linearizer lock %r for key %r", self.name, key, ) - yield make_deferred_yieldable(new_defer) + try: + yield make_deferred_yieldable(new_defer) + except Exception as e: + if isinstance(e, CancelledError): + logger.info( + "Cancelling wait for linearizer lock %r for key %r", + self.name, key, + ) + else: + logger.warn( + "Unexpected exception waiting for linearizer lock %r for key %r", + self.name, key, + ) + + # we just have to take ourselves back out of the queue. + del entry[1][new_defer] + raise logger.info("Acquired linearizer lock %r for key %r", self.name, key) entry[0] += 1 @@ -238,7 +254,7 @@ class Linearizer(object): entry[0] -= 1 if entry[1]: - next_def = entry[1].popleft() + (next_def, _) = entry[1].popitem(last=False) # we need to run the next thing in the sentinel context. with PreserveLoggingContext(): diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index c9563124f9..4729bd5a0a 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -17,6 +17,7 @@ from six.moves import range from twisted.internet import defer, reactor +from twisted.internet.defer import CancelledError from synapse.util import Clock, logcontext from synapse.util.async import Linearizer @@ -112,3 +113,33 @@ class LinearizerTestCase(unittest.TestCase): d6 = limiter.queue(key) with (yield d6): pass + + @defer.inlineCallbacks + def test_cancellation(self): + linearizer = Linearizer() + + key = object() + + d1 = linearizer.queue(key) + cm1 = yield d1 + + d2 = linearizer.queue(key) + self.assertFalse(d2.called) + + d3 = linearizer.queue(key) + self.assertFalse(d3.called) + + d2.cancel() + + with cm1: + pass + + self.assertTrue(d2.called) + try: + yield d2 + self.fail("Expected d2 to raise CancelledError") + except CancelledError: + pass + + with (yield d3): + pass -- cgit 1.5.1