From 32e7c9e7f20b57dd081023ac42d6931a8da9b3a3 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 20 Jun 2019 19:32:02 +1000 Subject: Run Black. (#5482) --- synapse/util/async_helpers.py | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) (limited to 'synapse/util/async_helpers.py') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 7253ba120f..7757b8708a 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -95,6 +95,7 @@ class ObservableDeferred(object): def remove(r): self._observers.discard(d) return r + d.addBoth(remove) self._observers.add(d) @@ -123,7 +124,9 @@ class ObservableDeferred(object): def __repr__(self): return "" % ( - id(self), self._result, self._deferred, + id(self), + self._result, + self._deferred, ) @@ -150,10 +153,12 @@ def concurrently_execute(func, args, limit): except StopIteration: pass - return logcontext.make_deferred_yieldable(defer.gatherResults([ - run_in_background(_concurrently_execute_inner) - for _ in range(limit) - ], consumeErrors=True)).addErrback(unwrapFirstError) + return logcontext.make_deferred_yieldable( + defer.gatherResults( + [run_in_background(_concurrently_execute_inner) for _ in range(limit)], + consumeErrors=True, + ) + ).addErrback(unwrapFirstError) def yieldable_gather_results(func, iter, *args, **kwargs): @@ -169,10 +174,12 @@ def yieldable_gather_results(func, iter, *args, **kwargs): Deferred[list]: Resolved when all functions have been invoked, or errors if one of the function calls fails. """ - return logcontext.make_deferred_yieldable(defer.gatherResults([ - run_in_background(func, item, *args, **kwargs) - for item in iter - ], consumeErrors=True)).addErrback(unwrapFirstError) + return logcontext.make_deferred_yieldable( + defer.gatherResults( + [run_in_background(func, item, *args, **kwargs) for item in iter], + consumeErrors=True, + ) + ).addErrback(unwrapFirstError) class Linearizer(object): @@ -185,6 +192,7 @@ class Linearizer(object): # do some work. """ + def __init__(self, name=None, max_count=1, clock=None): """ Args: @@ -197,6 +205,7 @@ class Linearizer(object): if not clock: from twisted.internet import reactor + clock = Clock(reactor) self._clock = clock self.max_count = max_count @@ -221,7 +230,7 @@ class Linearizer(object): res = self._await_lock(key) else: logger.debug( - "Acquired uncontended linearizer lock %r for key %r", self.name, key, + "Acquired uncontended linearizer lock %r for key %r", self.name, key ) entry[0] += 1 res = defer.succeed(None) @@ -266,9 +275,7 @@ class Linearizer(object): """ entry = self.key_to_defer[key] - logger.debug( - "Waiting to acquire linearizer lock %r for key %r", self.name, key, - ) + logger.debug("Waiting to acquire linearizer lock %r for key %r", self.name, key) new_defer = make_deferred_yieldable(defer.Deferred()) entry[1][new_defer] = 1 @@ -293,14 +300,14 @@ class Linearizer(object): logger.info("defer %r got err %r", new_defer, e) if isinstance(e, CancelledError): logger.debug( - "Cancelling wait for linearizer lock %r for key %r", - self.name, key, + "Cancelling wait for linearizer lock %r for key %r", self.name, key ) else: logger.warn( "Unexpected exception waiting for linearizer lock %r for key %r", - self.name, key, + self.name, + key, ) # we just have to take ourselves back out of the queue. @@ -438,7 +445,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): try: deferred.cancel() - except: # noqa: E722, if we throw any exception it'll break time outs + except: # noqa: E722, if we throw any exception it'll break time outs logger.exception("Canceller failed during timeout") if not new_d.called: -- cgit 1.5.1 From 463b072b1290a8cb75bc1d7b6688fa76bbfdb14f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 4 Jul 2019 00:07:04 +1000 Subject: Move logging utilities out of the side drawer of util/ and into logging/ (#5606) --- changelog.d/5606.misc | 1 + contrib/example_log_config.yaml | 8 +- contrib/experiments/test_messaging.py | 2 +- contrib/systemd/log_config.yaml | 2 +- debian/changelog | 3 + debian/log.yaml | 2 +- docker/conf/log.config | 2 +- docs/log_contexts.rst | 38 +- synapse/app/_base.py | 2 +- synapse/app/appservice.py | 2 +- synapse/app/client_reader.py | 2 +- synapse/app/event_creator.py | 2 +- synapse/app/federation_reader.py | 2 +- synapse/app/federation_sender.py | 2 +- synapse/app/frontend_proxy.py | 2 +- synapse/app/homeserver.py | 2 +- synapse/app/media_repository.py | 2 +- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 2 +- synapse/app/user_dir.py | 2 +- synapse/appservice/scheduler.py | 2 +- synapse/config/logger.py | 4 +- synapse/crypto/keyring.py | 15 +- synapse/events/snapshot.py | 2 +- synapse/federation/federation_base.py | 24 +- synapse/federation/federation_client.py | 8 +- synapse/federation/federation_server.py | 4 +- synapse/federation/persistence.py | 2 +- synapse/federation/sender/__init__.py | 12 +- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/groups/attestations.py | 2 +- synapse/handlers/account_validity.py | 2 +- synapse/handlers/appservice.py | 2 +- synapse/handlers/auth.py | 6 +- synapse/handlers/e2e_keys.py | 2 +- synapse/handlers/events.py | 2 +- synapse/handlers/federation.py | 43 +- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/message.py | 2 +- synapse/handlers/pagination.py | 2 +- synapse/handlers/presence.py | 4 +- synapse/handlers/sync.py | 2 +- synapse/handlers/typing.py | 2 +- synapse/http/client.py | 2 +- synapse/http/federation/matrix_federation_agent.py | 2 +- synapse/http/federation/srv_resolver.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/http/request_metrics.py | 2 +- synapse/http/server.py | 2 +- synapse/http/site.py | 2 +- synapse/logging/__init__.py | 0 synapse/logging/context.py | 693 +++++++++++++++++++++ synapse/logging/formatter.py | 53 ++ synapse/logging/utils.py | 194 ++++++ synapse/metrics/background_process_metrics.py | 2 +- synapse/notifier.py | 4 +- synapse/push/mailer.py | 2 +- synapse/replication/tcp/protocol.py | 2 +- synapse/rest/client/transactions.py | 2 +- synapse/rest/media/v1/_base.py | 6 +- synapse/rest/media/v1/media_repository.py | 12 +- synapse/rest/media/v1/media_storage.py | 5 +- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/rest/media/v1/storage_provider.py | 5 +- synapse/state/__init__.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/events.py | 4 +- synapse/storage/events_worker.py | 6 +- synapse/storage/stream.py | 2 +- synapse/util/__init__.py | 12 +- synapse/util/async_helpers.py | 9 +- synapse/util/caches/descriptors.py | 11 +- synapse/util/caches/response_cache.py | 4 +- synapse/util/distributor.py | 2 +- synapse/util/file_consumer.py | 2 +- synapse/util/logcontext.py | 693 --------------------- synapse/util/logformatter.py | 53 -- synapse/util/logutils.py | 194 ------ synapse/util/metrics.py | 2 +- synapse/util/ratelimitutils.py | 2 +- synapse/util/retryutils.py | 4 +- tests/appservice/test_scheduler.py | 2 +- tests/crypto/test_keyring.py | 13 +- .../federation/test_matrix_federation_agent.py | 2 +- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_fedclient.py | 2 +- tests/patch_inline_callbacks.py | 2 +- tests/push/test_http.py | 2 +- tests/rest/client/test_transactions.py | 2 +- tests/rest/media/v1/test_media_storage.py | 2 +- tests/test_federation.py | 2 +- tests/test_server.py | 2 +- tests/test_utils/logging_setup.py | 2 +- tests/unittest.py | 2 +- tests/util/caches/test_descriptors.py | 49 +- tests/util/test_async_utils.py | 7 +- tests/util/test_linearizer.py | 9 +- tests/util/test_logcontext.py | 24 +- tests/util/test_logformatter.py | 2 +- tests/utils.py | 2 +- 101 files changed, 1189 insertions(+), 1173 deletions(-) create mode 100644 changelog.d/5606.misc create mode 100644 synapse/logging/__init__.py create mode 100644 synapse/logging/context.py create mode 100644 synapse/logging/formatter.py create mode 100644 synapse/logging/utils.py delete mode 100644 synapse/util/logcontext.py delete mode 100644 synapse/util/logformatter.py delete mode 100644 synapse/util/logutils.py (limited to 'synapse/util/async_helpers.py') diff --git a/changelog.d/5606.misc b/changelog.d/5606.misc new file mode 100644 index 0000000000..bb3c028167 --- /dev/null +++ b/changelog.d/5606.misc @@ -0,0 +1 @@ +Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/contrib/example_log_config.yaml b/contrib/example_log_config.yaml index 06592963da..3a76a7a33f 100644 --- a/contrib/example_log_config.yaml +++ b/contrib/example_log_config.yaml @@ -1,7 +1,7 @@ -# Example log_config file for synapse. To enable, point `log_config` to it in +# Example log_config file for synapse. To enable, point `log_config` to it in # `homeserver.yaml`, and restart synapse. # -# This configuration will produce similar results to the defaults within +# This configuration will produce similar results to the defaults within # synapse, but can be edited to give more flexibility. version: 1 @@ -12,7 +12,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: @@ -35,7 +35,7 @@ handlers: root: level: INFO handlers: [console] # to use file handler instead, switch to [file] - + loggers: synapse: level: INFO diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index c7e55d8aa7..5ef140ae48 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid from synapse.app.homeserver import SynapseHomeServer -# from synapse.util.logutils import log_function +# from synapse.logging.utils import log_function from twisted.internet import reactor, defer from twisted.python import log diff --git a/contrib/systemd/log_config.yaml b/contrib/systemd/log_config.yaml index d85bdd1208..22f67a50ce 100644 --- a/contrib/systemd/log_config.yaml +++ b/contrib/systemd/log_config.yaml @@ -8,7 +8,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/debian/changelog b/debian/changelog index 91653e7243..bf5a59b335 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,9 @@ matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium [ Silke Hofstra ] * Include systemd-python to allow logging to the systemd journal. + [ Amber Brown ] + * Update logging config defaults to match API changes in Synapse. + -- Silke Hofstra Wed, 29 May 2019 09:45:29 +0200 matrix-synapse-py3 (1.0.0) stable; urgency=medium diff --git a/debian/log.yaml b/debian/log.yaml index 206b65f1ac..95b655dd35 100644 --- a/debian/log.yaml +++ b/debian/log.yaml @@ -7,7 +7,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/docker/conf/log.config b/docker/conf/log.config index 895e45d20b..ea5ccfd68b 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -6,7 +6,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index 27cde11cf7..f5cd5de8ab 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -1,4 +1,4 @@ -Log contexts +Log Contexts ============ .. contents:: @@ -12,7 +12,7 @@ record. Logcontexts are also used for CPU and database accounting, so that we can track which requests were responsible for high CPU use or database activity. -The ``synapse.util.logcontext`` module provides a facilities for managing the +The ``synapse.logging.context`` module provides a facilities for managing the current log context (as well as providing the ``LoggingContextFilter`` class). Deferreds make the whole thing complicated, so this document describes how it @@ -27,19 +27,19 @@ found them: .. code:: python - from synapse.util import logcontext # omitted from future snippets + from synapse.logging import context # omitted from future snippets def handle_request(request_id): - request_context = logcontext.LoggingContext() + request_context = context.LoggingContext() - calling_context = logcontext.LoggingContext.current_context() - logcontext.LoggingContext.set_current_context(request_context) + calling_context = context.LoggingContext.current_context() + context.LoggingContext.set_current_context(request_context) try: request_context.request = request_id do_request_handling() logger.debug("finished") finally: - logcontext.LoggingContext.set_current_context(calling_context) + context.LoggingContext.set_current_context(calling_context) def do_request_handling(): logger.debug("phew") # this will be logged against request_id @@ -51,7 +51,7 @@ written much more succinctly as: .. code:: python def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id do_request_handling() logger.debug("finished") @@ -74,7 +74,7 @@ blocking operation, and returns a deferred: @defer.inlineCallbacks def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id yield do_request_handling() logger.debug("finished") @@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from external code. We need to make it follow our rules. The easy way to do it is with a combination of ``defer.inlineCallbacks``, and -``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``, +``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``, which returns a deferred which will run its callbacks after a given number of seconds. That might look like: @@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with This technique works equally for external functions which return deferreds, or deferreds we have made ourselves. -You can also use ``logcontext.make_deferred_yieldable``, which just does the +You can also use ``context.make_deferred_yieldable``, which just does the boilerplate for you, so the above could be written: .. code:: python def sleep(seconds): - return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds)) + return context.make_deferred_yieldable(get_sleep_deferred(seconds)) Fire-and-forget @@ -279,7 +279,7 @@ Obviously that option means that the operations done in that might be fixed by setting a different logcontext via a ``with LoggingContext(...)`` in ``background_operation``). -The second option is to use ``logcontext.run_in_background``, which wraps a +The second option is to use ``context.run_in_background``, which wraps a function so that it doesn't reset the logcontext even when it returns an incomplete deferred, and adds a callback to the returned deferred to reset the logcontext. In other words, it turns a function that follows the Synapse rules @@ -293,7 +293,7 @@ It can be used like this: def do_request_handling(): yield foreground_operation() - logcontext.run_in_background(background_operation) + context.run_in_background(background_operation) # this will now be logged against the request context logger.debug("Request handling complete") @@ -332,7 +332,7 @@ gathered: result = yield defer.gatherResults([d1, d2]) In this case particularly, though, option two, of using -``logcontext.preserve_fn`` almost certainly makes more sense, so that +``context.preserve_fn`` almost certainly makes more sense, so that ``operation1`` and ``operation2`` are both logged against the original logcontext. This looks like: @@ -340,8 +340,8 @@ logcontext. This looks like: @defer.inlineCallbacks def do_request_handling(): - d1 = logcontext.preserve_fn(operation1)() - d2 = logcontext.preserve_fn(operation2)() + d1 = context.preserve_fn(operation1)() + d2 = context.preserve_fn(operation2)() with PreserveLoggingContext(): result = yield defer.gatherResults([d1, d2]) @@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it: .. code:: python def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id d = do_request_handling() @@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy. The business of a Deferred which runs its callbacks in the original logcontext isn't hard to achieve — we have it today, in the shape of -``logcontext._PreservingContextDeferred``: +``context._PreservingContextDeferred``: .. code:: python diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 9b5c30f506..1ebb7ae539 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse from synapse.app import check_bind_error from synapse.crypto import context_factory -from synapse.util import PreserveLoggingContext +from synapse.logging.context import PreserveLoggingContext from synapse.util.async_helpers import Linearizer from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 9120bdb143..be44249ed6 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -26,6 +26,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -36,7 +37,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 90bc79cdda..ff11beca82 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -64,7 +65,6 @@ from synapse.rest.client.versions import VersionsRestServlet from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index ff522e4499..cacad25eac 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -59,7 +60,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 9421420930..11e80dbae0 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.federation.transport.server import TransportLayerServer from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -48,7 +49,6 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 969be58d0b..97da7bdcbf 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.federation import send_queue from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.resource import METRICS_PREFIX, MetricsResource @@ -44,7 +45,6 @@ from synapse.storage.engines import create_engine from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 2fd7d57ebf..417a10bbd2 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -29,6 +29,7 @@ from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -41,7 +42,6 @@ from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 49da105cf6..639b1429c0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -54,6 +54,7 @@ from synapse.federation.transport.server import TransportLayerServer from synapse.http.additional_resource import AdditionalResource from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.resource import METRICS_PREFIX, MetricsResource @@ -72,7 +73,6 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index cf0e2036c3..f23b9b6eda 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -27,6 +27,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -40,7 +41,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.media_repository import MediaRepositoryStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index df29ea5ecb..4f929edf86 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -26,6 +26,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import __func__ @@ -38,7 +39,6 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 858949910d..de4797fddc 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -31,6 +31,7 @@ from synapse.config.logger import setup_logging from synapse.handlers.presence import PresenceHandler, get_interested_parties from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ @@ -57,7 +58,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 2d9d2e1bbc..1177ddd72e 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -46,7 +47,6 @@ from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index b54bf5411f..e5b36494f5 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -53,8 +53,8 @@ import logging from twisted.internet import defer from synapse.appservice import ApplicationServiceState +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 931aec41c0..0f5554211c 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner import synapse from synapse.app import _base as appbase -from synapse.util.logcontext import LoggingContextFilter +from synapse.logging.context import LoggingContextFilter from synapse.util.versionstring import get_version_string from ._base import Config @@ -40,7 +40,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 10c2eb7f0f..341c863152 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -44,15 +44,16 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) -from synapse.storage.keys import FetchKeyResult -from synapse.util import logcontext, unwrapFirstError -from synapse.util.async_helpers import yieldable_gather_results -from synapse.util.logcontext import ( +from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, + make_deferred_yieldable, preserve_fn, run_in_background, ) +from synapse.storage.keys import FetchKeyResult +from synapse.util import unwrapFirstError +from synapse.util.async_helpers import yieldable_gather_results from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination @@ -140,7 +141,7 @@ class Keyring(object): """ req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) requests = (req,) - return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0]) + return make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as @@ -557,7 +558,7 @@ class BaseV2KeyFetcher(object): signed_key_json_bytes = encode_canonical_json(signed_key_json) - yield logcontext.make_deferred_yieldable( + yield make_deferred_yieldable( defer.gatherResults( [ run_in_background( @@ -612,7 +613,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): defer.returnValue({}) - results = yield logcontext.make_deferred_yieldable( + results = yield make_deferred_yieldable( defer.gatherResults( [run_in_background(get_key, server) for server in self.key_servers], consumeErrors=True, diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a96cdada3d..a9545e6c1b 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,7 +19,7 @@ from frozendict import frozendict from twisted.internet import defer -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background class EventContext(object): diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 1e925b19e7..f7bb806ae7 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -27,8 +27,14 @@ from synapse.crypto.event_signing import check_event_content_hash from synapse.events import event_type_from_format_version from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, + preserve_fn, +) from synapse.types import get_domain_from_id -from synapse.util import logcontext, unwrapFirstError +from synapse.util import unwrapFirstError logger = logging.getLogger(__name__) @@ -73,7 +79,7 @@ class FederationBase(object): @defer.inlineCallbacks def handle_check_result(pdu, deferred): try: - res = yield logcontext.make_deferred_yieldable(deferred) + res = yield make_deferred_yieldable(deferred) except SynapseError: res = None @@ -102,10 +108,10 @@ class FederationBase(object): defer.returnValue(res) - handle = logcontext.preserve_fn(handle_check_result) + handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] - valid_pdus = yield logcontext.make_deferred_yieldable( + valid_pdus = yield make_deferred_yieldable( defer.gatherResults(deferreds2, consumeErrors=True) ).addErrback(unwrapFirstError) @@ -115,7 +121,7 @@ class FederationBase(object): defer.returnValue([p for p in valid_pdus if p]) def _check_sigs_and_hash(self, room_version, pdu): - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( self._check_sigs_and_hashes(room_version, [pdu])[0] ) @@ -133,14 +139,14 @@ class FederationBase(object): * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. - The deferreds run their callbacks in the sentinel logcontext. + The deferreds run their callbacks in the sentinel """ deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) - ctx = logcontext.LoggingContext.current_context() + ctx = LoggingContext.current_context() def callback(_, pdu): - with logcontext.PreserveLoggingContext(ctx): + with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering @@ -178,7 +184,7 @@ class FederationBase(object): def errback(failure, pdu): failure.trap(SynapseError) - with logcontext.PreserveLoggingContext(ctx): + with PreserveLoggingContext(ctx): logger.warn( "Signature check failed for %s: %s", pdu.event_id, diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3883eb525e..3cb4b94420 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -39,10 +39,10 @@ from synapse.api.room_versions import ( ) from synapse.events import builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json -from synapse.util import logcontext, unwrapFirstError +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.utils import log_function +from synapse.util import unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -207,7 +207,7 @@ class FederationClient(FederationBase): ] # FIXME: We should handle signature failures more gracefully. - pdus[:] = yield logcontext.make_deferred_yieldable( + pdus[:] = yield make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True ).addErrback(unwrapFirstError) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2e0cebb638..8c0a18b120 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -42,6 +42,8 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name +from synapse.logging.context import nested_logging_context +from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, @@ -50,8 +52,6 @@ from synapse.types import get_domain_from_id from synapse.util import glob_to_regex from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import nested_logging_context -from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in # parallel, up to this limit. diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 7535f79203..d086e04243 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -23,7 +23,7 @@ import logging from twisted.internet import defer -from synapse.util.logutils import log_function +from synapse.logging.utils import log_function logger = logging.getLogger(__name__) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 766c5a37cd..d46f4aaeb1 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -26,6 +26,11 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu from synapse.handlers.presence import get_interested_remotes +from synapse.logging.context import ( + make_deferred_yieldable, + preserve_fn, + run_in_background, +) from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -33,7 +38,6 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import logcontext from synapse.util.metrics import measure_func logger = logging.getLogger(__name__) @@ -210,10 +214,10 @@ class FederationSender(object): for event in events: events_by_room.setdefault(event.room_id, []).append(event) - yield logcontext.make_deferred_yieldable( + yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background(handle_room_events, evs) + run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True, @@ -360,7 +364,7 @@ class FederationSender(object): for queue in queues: queue.flush_read_receipts_for_room(room_id) - @logcontext.preserve_fn # the caller should not yield on this + @preserve_fn # the caller should not yield on this @defer.inlineCallbacks def send_presence(self, states): """Send the new presence states to the appropriate destinations. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index aecd142309..1aae9ec9e7 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -22,7 +22,7 @@ from twisted.internet import defer from synapse.api.constants import Membership from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX -from synapse.util.logutils import log_function +from synapse.logging.utils import log_function logger = logging.getLogger(__name__) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 955f0f4308..2efdcff7ef 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -36,8 +36,8 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string_from_args, ) +from synapse.logging.context import run_in_background from synapse.types import ThirdPartyInstanceID, get_domain_from_id -from synapse.util.logcontext import run_in_background from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e73757570c..f497711133 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -43,9 +43,9 @@ from signedjson.sign import sign_json from twisted.internet import defer from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index edb48054a0..1f1708ba7d 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -22,10 +22,10 @@ from email.mime.text import MIMEText from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.context import make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID from synapse.util import stringutils -from synapse.util.logcontext import make_deferred_yieldable try: from synapse.push.mailer import load_jinja2_templates diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5cc89d43f6..8f089f0e33 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -23,13 +23,13 @@ from twisted.internet import defer import synapse from synapse.api.constants import EventTypes +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( event_processing_loop_counter, event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util import log_failure -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index c8c1ed3246..ef5585aa99 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -36,9 +36,9 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi from synapse.types import UserID -from synapse.util import logcontext from synapse.util.caches.expiringcache import ExpiringCache from ._base import BaseHandler @@ -987,7 +987,7 @@ class AuthHandler(BaseHandler): bcrypt.gensalt(self.bcrypt_rounds), ).decode("ascii") - return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash) + return defer_to_thread(self.hs.get_reactor(), _do_hash) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -1013,7 +1013,7 @@ class AuthHandler(BaseHandler): if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode("ascii") - return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash) + return defer_to_thread(self.hs.get_reactor(), _do_validate_hash) else: return defer.succeed(False) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 807900fe52..55b4ab3a1a 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -23,8 +23,8 @@ from canonicaljson import encode_canonical_json, json from twisted.internet import defer from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID, get_domain_from_id -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 5836d3c639..6a38328af3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -21,8 +21,8 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase +from synapse.logging.utils import log_function from synapse.types import UserID -from synapse.util.logutils import log_function from synapse.visibility import filter_events_for_client from ._base import BaseHandler diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 02d397c498..57be968c67 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -45,6 +45,13 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event from synapse.events.validator import EventValidator +from synapse.logging.context import ( + make_deferred_yieldable, + nested_logging_context, + preserve_fn, + run_in_background, +) +from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, @@ -52,10 +59,9 @@ from synapse.replication.http.federation import ( from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.types import UserID, get_domain_from_id -from synapse.util import logcontext, unwrapFirstError +from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room -from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -338,7 +344,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(room_id) - with logcontext.nested_logging_context(p): + with nested_logging_context(p): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. @@ -532,7 +538,7 @@ class FederationHandler(BaseHandler): event_id, ev.event_id, ) - with logcontext.nested_logging_context(ev.event_id): + with nested_logging_context(ev.event_id): try: yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: @@ -725,10 +731,10 @@ class FederationHandler(BaseHandler): missing_auth - failed_to_fetch, ) - results = yield logcontext.make_deferred_yieldable( + results = yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background( + run_in_background( self.federation_client.get_pdu, [dest], event_id, @@ -994,10 +1000,8 @@ class FederationHandler(BaseHandler): event_ids = list(extremities.keys()) logger.debug("calling resolve_state_groups in _maybe_backfill") - resolve = logcontext.preserve_fn( - self.state_handler.resolve_state_groups_for_events - ) - states = yield logcontext.make_deferred_yieldable( + resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) + states = yield make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) @@ -1171,7 +1175,7 @@ class FederationHandler(BaseHandler): # lots of requests for missing prev_events which we do actually # have. Hence we fire off the deferred, but don't wait for it. - logcontext.run_in_background(self._handle_queued_pdus, room_queue) + run_in_background(self._handle_queued_pdus, room_queue) defer.returnValue(True) @@ -1191,7 +1195,7 @@ class FederationHandler(BaseHandler): p.event_id, p.room_id, ) - with logcontext.nested_logging_context(p.event_id): + with nested_logging_context(p.event_id): yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( @@ -1610,7 +1614,7 @@ class FederationHandler(BaseHandler): success = True finally: if not success: - logcontext.run_in_background( + run_in_background( self.store.remove_push_actions_from_staging, event.event_id ) @@ -1629,7 +1633,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def prep(ev_info): event = ev_info["event"] - with logcontext.nested_logging_context(suffix=event.event_id): + with nested_logging_context(suffix=event.event_id): res = yield self._prep_event( origin, event, @@ -1639,12 +1643,9 @@ class FederationHandler(BaseHandler): ) defer.returnValue(res) - contexts = yield logcontext.make_deferred_yieldable( + contexts = yield make_deferred_yieldable( defer.gatherResults( - [ - logcontext.run_in_background(prep, ev_info) - for ev_info in event_infos - ], + [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) @@ -2106,10 +2107,10 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - different_events = yield logcontext.make_deferred_yieldable( + different_events = yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background( + run_in_background( self.store.get_event, d, allow_none=True, allow_rejected=False ) for d in different_auth diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index a1fe9d116f..54c966c8a6 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -21,12 +21,12 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, SynapseError from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.streams.config import PaginationConfig from synapse.types import StreamToken, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.visibility import filter_events_for_client from ._base import BaseHandler diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 683da6bf32..eaeda7a5cb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -34,13 +34,13 @@ from synapse.api.errors import ( from synapse.api.room_versions import RoomVersions from synapse.api.urls import ConsentURIBuilder from synapse.events.validator import EventValidator +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID, create_requester from synapse.util.async_helpers import Linearizer from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import run_in_background from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 76ee97ddd3..20bcfed334 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -20,10 +20,10 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError +from synapse.logging.context import run_in_background from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock -from synapse.util.logcontext import run_in_background from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c80dc2eba0..6f3537e435 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,14 +34,14 @@ from twisted.internet import defer import synapse.metrics from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError +from synapse.logging.context import run_in_background +from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.presence import UserPresenceState from synapse.types import UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.logcontext import run_in_background -from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a3f550554f..cd1ac0a27a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -25,6 +25,7 @@ from prometheus_client import Counter from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.logging.context import LoggingContext from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter @@ -33,7 +34,6 @@ from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure, measure_func from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index f8062c8671..c3e0c8fc7e 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -19,9 +19,9 @@ from collections import namedtuple from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError +from synapse.logging.context import run_in_background from synapse.types import UserID, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer diff --git a/synapse/http/client.py b/synapse/http/client.py index 9bc7035c8d..45d5010952 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -45,9 +45,9 @@ from synapse.http import ( cancelled_to_request_timed_out_error, redact_uri, ) +from synapse.logging.context import make_deferred_yieldable from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR -from synapse.util.logcontext import make_deferred_yieldable logger = logging.getLogger(__name__) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 414cde0777..054c321a20 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -30,9 +30,9 @@ from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock from synapse.util.caches.ttlcache import TTLCache -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure # period to cache .well-known results for by default diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index 1f22f78a75..ecc88f9b96 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -25,7 +25,7 @@ from twisted.internet.error import ConnectError from twisted.names import client, dns from twisted.names.error import DNSNameError, DomainError -from synapse.util.logcontext import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable logger = logging.getLogger(__name__) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 5ef8bb60a3..dee3710f68 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -48,8 +48,8 @@ from synapse.api.errors import ( from synapse.http import QuieterFileBodyProducer from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.logging.context import make_deferred_yieldable from synapse.util.async_helpers import timeout_deferred -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 62045a918b..46af27c8f6 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -19,8 +19,8 @@ import threading from prometheus_client.core import Counter, Histogram +from synapse.logging.context import LoggingContext from synapse.metrics import LaterGauge -from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/http/server.py b/synapse/http/server.py index d993161a3e..72a3d67eb6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -39,8 +39,8 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.logging.context import preserve_fn from synapse.util.caches import intern_dict -from synapse.util.logcontext import preserve_fn logger = logging.getLogger(__name__) diff --git a/synapse/http/site.py b/synapse/http/site.py index 93f679ea48..df5274c177 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -19,7 +19,7 @@ from twisted.web.server import Request, Site from synapse.http import redact_uri from synapse.http.request_metrics import RequestMetrics, requests_counter -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.logging.context import LoggingContext, PreserveLoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/logging/context.py b/synapse/logging/context.py new file mode 100644 index 0000000000..30dfa1d6b2 --- /dev/null +++ b/synapse/logging/context.py @@ -0,0 +1,693 @@ +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Thread-local-alike tracking of log contexts within synapse + +This module provides objects and utilities for tracking contexts through +synapse code, so that log lines can include a request identifier, and so that +CPU and database activity can be accounted for against the request that caused +them. + +See doc/log_contexts.rst for details on how this works. +""" + +import logging +import threading +import types + +from twisted.internet import defer, threads + +logger = logging.getLogger(__name__) + +try: + import resource + + # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined + # to be 1 on linux so we hard code it. + RUSAGE_THREAD = 1 + + # If the system doesn't support RUSAGE_THREAD then this should throw an + # exception. + resource.getrusage(RUSAGE_THREAD) + + def get_thread_resource_usage(): + return resource.getrusage(RUSAGE_THREAD) + + +except Exception: + # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we + # won't track resource usage by returning None. + def get_thread_resource_usage(): + return None + + +# get an id for the current thread. +# +# threading.get_ident doesn't actually return an OS-level tid, and annoyingly, +# on Linux it actually returns the same value either side of a fork() call. However +# we only fork in one place, so it's not worth the hoop-jumping to get a real tid. +# +get_thread_id = threading.get_ident + + +class ContextResourceUsage(object): + """Object for tracking the resources used by a log context + + Attributes: + ru_utime (float): user CPU time (in seconds) + ru_stime (float): system CPU time (in seconds) + db_txn_count (int): number of database transactions done + db_sched_duration_sec (float): amount of time spent waiting for a + database connection + db_txn_duration_sec (float): amount of time spent doing database + transactions (excluding scheduling time) + evt_db_fetch_count (int): number of events requested from the database + """ + + __slots__ = [ + "ru_stime", + "ru_utime", + "db_txn_count", + "db_txn_duration_sec", + "db_sched_duration_sec", + "evt_db_fetch_count", + ] + + def __init__(self, copy_from=None): + """Create a new ContextResourceUsage + + Args: + copy_from (ContextResourceUsage|None): if not None, an object to + copy stats from + """ + if copy_from is None: + self.reset() + else: + self.ru_utime = copy_from.ru_utime + self.ru_stime = copy_from.ru_stime + self.db_txn_count = copy_from.db_txn_count + + self.db_txn_duration_sec = copy_from.db_txn_duration_sec + self.db_sched_duration_sec = copy_from.db_sched_duration_sec + self.evt_db_fetch_count = copy_from.evt_db_fetch_count + + def copy(self): + return ContextResourceUsage(copy_from=self) + + def reset(self): + self.ru_stime = 0.0 + self.ru_utime = 0.0 + self.db_txn_count = 0 + + self.db_txn_duration_sec = 0 + self.db_sched_duration_sec = 0 + self.evt_db_fetch_count = 0 + + def __repr__(self): + return ( + "" + ) % ( + self.ru_stime, + self.ru_utime, + self.db_txn_count, + self.db_txn_duration_sec, + self.db_sched_duration_sec, + self.evt_db_fetch_count, + ) + + def __iadd__(self, other): + """Add another ContextResourceUsage's stats to this one's. + + Args: + other (ContextResourceUsage): the other resource usage object + """ + self.ru_utime += other.ru_utime + self.ru_stime += other.ru_stime + self.db_txn_count += other.db_txn_count + self.db_txn_duration_sec += other.db_txn_duration_sec + self.db_sched_duration_sec += other.db_sched_duration_sec + self.evt_db_fetch_count += other.evt_db_fetch_count + return self + + def __isub__(self, other): + self.ru_utime -= other.ru_utime + self.ru_stime -= other.ru_stime + self.db_txn_count -= other.db_txn_count + self.db_txn_duration_sec -= other.db_txn_duration_sec + self.db_sched_duration_sec -= other.db_sched_duration_sec + self.evt_db_fetch_count -= other.evt_db_fetch_count + return self + + def __add__(self, other): + res = ContextResourceUsage(copy_from=self) + res += other + return res + + def __sub__(self, other): + res = ContextResourceUsage(copy_from=self) + res -= other + return res + + +class LoggingContext(object): + """Additional context for log formatting. Contexts are scoped within a + "with" block. + + If a parent is given when creating a new context, then: + - logging fields are copied from the parent to the new context on entry + - when the new context exits, the cpu usage stats are copied from the + child to the parent + + Args: + name (str): Name for the context for debugging. + parent_context (LoggingContext|None): The parent of the new context + """ + + __slots__ = [ + "previous_context", + "name", + "parent_context", + "_resource_usage", + "usage_start", + "main_thread", + "alive", + "request", + "tag", + ] + + thread_local = threading.local() + + class Sentinel(object): + """Sentinel to represent the root context""" + + __slots__ = [] + + def __str__(self): + return "sentinel" + + def copy_to(self, record): + pass + + def start(self): + pass + + def stop(self): + pass + + def add_database_transaction(self, duration_sec): + pass + + def add_database_scheduled(self, sched_sec): + pass + + def record_event_fetch(self, event_count): + pass + + def __nonzero__(self): + return False + + __bool__ = __nonzero__ # python3 + + sentinel = Sentinel() + + def __init__(self, name=None, parent_context=None, request=None): + self.previous_context = LoggingContext.current_context() + self.name = name + + # track the resources used by this context so far + self._resource_usage = ContextResourceUsage() + + # If alive has the thread resource usage when the logcontext last + # became active. + self.usage_start = None + + self.main_thread = get_thread_id() + self.request = None + self.tag = "" + self.alive = True + + self.parent_context = parent_context + + if self.parent_context is not None: + self.parent_context.copy_to(self) + + if request is not None: + # the request param overrides the request from the parent context + self.request = request + + def __str__(self): + if self.request: + return str(self.request) + return "%s@%x" % (self.name, id(self)) + + @classmethod + def current_context(cls): + """Get the current logging context from thread local storage + + Returns: + LoggingContext: the current logging context + """ + return getattr(cls.thread_local, "current_context", cls.sentinel) + + @classmethod + def set_current_context(cls, context): + """Set the current logging context in thread local storage + Args: + context(LoggingContext): The context to activate. + Returns: + The context that was previously active + """ + current = cls.current_context() + + if current is not context: + current.stop() + cls.thread_local.current_context = context + context.start() + return current + + def __enter__(self): + """Enters this logging context into thread local storage""" + old_context = self.set_current_context(self) + if self.previous_context != old_context: + logger.warn( + "Expected previous context %r, found %r", + self.previous_context, + old_context, + ) + self.alive = True + + return self + + def __exit__(self, type, value, traceback): + """Restore the logging context in thread local storage to the state it + was before this context was entered. + Returns: + None to avoid suppressing any exceptions that were thrown. + """ + current = self.set_current_context(self.previous_context) + if current is not self: + if current is self.sentinel: + logger.warning("Expected logging context %s was lost", self) + else: + logger.warning( + "Expected logging context %s but found %s", self, current + ) + self.previous_context = None + self.alive = False + + # if we have a parent, pass our CPU usage stats on + if self.parent_context is not None and hasattr( + self.parent_context, "_resource_usage" + ): + self.parent_context._resource_usage += self._resource_usage + + # reset them in case we get entered again + self._resource_usage.reset() + + def copy_to(self, record): + """Copy logging fields from this context to a log record or + another LoggingContext + """ + + # 'request' is the only field we currently use in the logger, so that's + # all we need to copy + record.request = self.request + + def start(self): + if get_thread_id() != self.main_thread: + logger.warning("Started logcontext %s on different thread", self) + return + + # If we haven't already started record the thread resource usage so + # far + if not self.usage_start: + self.usage_start = get_thread_resource_usage() + + def stop(self): + if get_thread_id() != self.main_thread: + logger.warning("Stopped logcontext %s on different thread", self) + return + + # When we stop, let's record the cpu used since we started + if not self.usage_start: + logger.warning("Called stop on logcontext %s without calling start", self) + return + + utime_delta, stime_delta = self._get_cputime() + self._resource_usage.ru_utime += utime_delta + self._resource_usage.ru_stime += stime_delta + + self.usage_start = None + + def get_resource_usage(self): + """Get resources used by this logcontext so far. + + Returns: + ContextResourceUsage: a *copy* of the object tracking resource + usage so far + """ + # we always return a copy, for consistency + res = self._resource_usage.copy() + + # If we are on the correct thread and we're currently running then we + # can include resource usage so far. + is_main_thread = get_thread_id() == self.main_thread + if self.alive and self.usage_start and is_main_thread: + utime_delta, stime_delta = self._get_cputime() + res.ru_utime += utime_delta + res.ru_stime += stime_delta + + return res + + def _get_cputime(self): + """Get the cpu usage time so far + + Returns: Tuple[float, float]: seconds in user mode, seconds in system mode + """ + current = get_thread_resource_usage() + + utime_delta = current.ru_utime - self.usage_start.ru_utime + stime_delta = current.ru_stime - self.usage_start.ru_stime + + # sanity check + if utime_delta < 0: + logger.error( + "utime went backwards! %f < %f", + current.ru_utime, + self.usage_start.ru_utime, + ) + utime_delta = 0 + + if stime_delta < 0: + logger.error( + "stime went backwards! %f < %f", + current.ru_stime, + self.usage_start.ru_stime, + ) + stime_delta = 0 + + return utime_delta, stime_delta + + def add_database_transaction(self, duration_sec): + if duration_sec < 0: + raise ValueError("DB txn time can only be non-negative") + self._resource_usage.db_txn_count += 1 + self._resource_usage.db_txn_duration_sec += duration_sec + + def add_database_scheduled(self, sched_sec): + """Record a use of the database pool + + Args: + sched_sec (float): number of seconds it took us to get a + connection + """ + if sched_sec < 0: + raise ValueError("DB scheduling time can only be non-negative") + self._resource_usage.db_sched_duration_sec += sched_sec + + def record_event_fetch(self, event_count): + """Record a number of events being fetched from the db + + Args: + event_count (int): number of events being fetched + """ + self._resource_usage.evt_db_fetch_count += event_count + + +class LoggingContextFilter(logging.Filter): + """Logging filter that adds values from the current logging context to each + record. + Args: + **defaults: Default values to avoid formatters complaining about + missing fields + """ + + def __init__(self, **defaults): + self.defaults = defaults + + def filter(self, record): + """Add each fields from the logging contexts to the record. + Returns: + True to include the record in the log output. + """ + context = LoggingContext.current_context() + for key, value in self.defaults.items(): + setattr(record, key, value) + + # context should never be None, but if it somehow ends up being, then + # we end up in a death spiral of infinite loops, so let's check, for + # robustness' sake. + if context is not None: + context.copy_to(record) + + return True + + +class PreserveLoggingContext(object): + """Captures the current logging context and restores it when the scope is + exited. Used to restore the context after a function using + @defer.inlineCallbacks is resumed by a callback from the reactor.""" + + __slots__ = ["current_context", "new_context", "has_parent"] + + def __init__(self, new_context=None): + if new_context is None: + new_context = LoggingContext.sentinel + self.new_context = new_context + + def __enter__(self): + """Captures the current logging context""" + self.current_context = LoggingContext.set_current_context(self.new_context) + + if self.current_context: + self.has_parent = self.current_context.previous_context is not None + if not self.current_context.alive: + logger.debug("Entering dead context: %s", self.current_context) + + def __exit__(self, type, value, traceback): + """Restores the current logging context""" + context = LoggingContext.set_current_context(self.current_context) + + if context != self.new_context: + if context is LoggingContext.sentinel: + logger.warning("Expected logging context %s was lost", self.new_context) + else: + logger.warning( + "Expected logging context %s but found %s", + self.new_context, + context, + ) + + if self.current_context is not LoggingContext.sentinel: + if not self.current_context.alive: + logger.debug("Restoring dead context: %s", self.current_context) + + +def nested_logging_context(suffix, parent_context=None): + """Creates a new logging context as a child of another. + + The nested logging context will have a 'request' made up of the parent context's + request, plus the given suffix. + + CPU/db usage stats will be added to the parent context's on exit. + + Normal usage looks like: + + with nested_logging_context(suffix): + # ... do stuff + + Args: + suffix (str): suffix to add to the parent context's 'request'. + parent_context (LoggingContext|None): parent context. Will use the current context + if None. + + Returns: + LoggingContext: new logging context. + """ + if parent_context is None: + parent_context = LoggingContext.current_context() + return LoggingContext( + parent_context=parent_context, request=parent_context.request + "-" + suffix + ) + + +def preserve_fn(f): + """Function decorator which wraps the function with run_in_background""" + + def g(*args, **kwargs): + return run_in_background(f, *args, **kwargs) + + return g + + +def run_in_background(f, *args, **kwargs): + """Calls a function, ensuring that the current context is restored after + return from the function, and that the sentinel context is set once the + deferred returned by the function completes. + + Useful for wrapping functions that return a deferred or coroutine, which you don't + yield or await on (for instance because you want to pass it to + deferred.gatherResults()). + + Note that if you completely discard the result, you should make sure that + `f` doesn't raise any deferred exceptions, otherwise a scary-looking + CRITICAL error about an unhandled error will be logged without much + indication about where it came from. + """ + current = LoggingContext.current_context() + try: + res = f(*args, **kwargs) + except: # noqa: E722 + # the assumption here is that the caller doesn't want to be disturbed + # by synchronous exceptions, so let's turn them into Failures. + return defer.fail() + + if isinstance(res, types.CoroutineType): + res = defer.ensureDeferred(res) + + if not isinstance(res, defer.Deferred): + return res + + if res.called and not res.paused: + # The function should have maintained the logcontext, so we can + # optimise out the messing about + return res + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = LoggingContext.set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + res.addBoth(_set_context_cb, ctx) + return res + + +def make_deferred_yieldable(deferred): + """Given a deferred, make it follow the Synapse logcontext rules: + + If the deferred has completed (or is not actually a Deferred), essentially + does nothing (just returns another completed deferred with the + result/failure). + + If the deferred has not yet completed, resets the logcontext before + returning a deferred. Then, when the deferred completes, restores the + current logcontext before running callbacks/errbacks. + + (This is more-or-less the opposite operation to run_in_background.) + """ + if not isinstance(deferred, defer.Deferred): + return deferred + + if deferred.called and not deferred.paused: + # it looks like this deferred is ready to run any callbacks we give it + # immediately. We may as well optimise out the logcontext faffery. + return deferred + + # ok, we can't be sure that a yield won't block, so let's reset the + # logcontext, and add a callback to the deferred to restore it. + prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + deferred.addBoth(_set_context_cb, prev_context) + return deferred + + +def _set_context_cb(result, context): + """A callback function which just sets the logging context""" + LoggingContext.set_current_context(context) + return result + + +def defer_to_thread(reactor, f, *args, **kwargs): + """ + Calls the function `f` using a thread from the reactor's default threadpool and + returns the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked, and whose threadpool we should use for the + function. + + Normally this will be hs.get_reactor(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) + + +def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): + """ + A wrapper for twisted.internet.threads.deferToThreadpool, which handles + logcontexts correctly. + + Calls the function `f` using a thread from the given threadpool and returns + the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked. Normally this will be hs.get_reactor(). + + threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for + running `f`. Normally this will be hs.get_reactor().getThreadPool(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + logcontext = LoggingContext.current_context() + + def g(): + with LoggingContext(parent_context=logcontext): + return f(*args, **kwargs) + + return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py new file mode 100644 index 0000000000..fbf570c756 --- /dev/null +++ b/synapse/logging/formatter.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import traceback + +from six import StringIO + + +class LogFormatter(logging.Formatter): + """Log formatter which gives more detail for exceptions + + This is the same as the standard log formatter, except that when logging + exceptions [typically via log.foo("msg", exc_info=1)], it prints the + sequence that led up to the point at which the exception was caught. + (Normally only stack frames between the point the exception was raised and + where it was caught are logged). + """ + + def __init__(self, *args, **kwargs): + super(LogFormatter, self).__init__(*args, **kwargs) + + def formatException(self, ei): + sio = StringIO() + (typ, val, tb) = ei + + # log the stack above the exception capture point if possible, but + # check that we actually have an f_back attribute to work around + # https://twistedmatrix.com/trac/ticket/9305 + + if tb and hasattr(tb.tb_frame, "f_back"): + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(tb.tb_frame.f_back, None, sio) + + traceback.print_exception(typ, val, tb, None, sio) + s = sio.getvalue() + sio.close() + if s[-1:] == "\n": + s = s[:-1] + return s diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py new file mode 100644 index 0000000000..7df0fa6087 --- /dev/null +++ b/synapse/logging/utils.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +import logging +import time +from functools import wraps +from inspect import getcallargs + +from six import PY3 + +_TIME_FUNC_ID = 0 + + +def _log_debug_as_f(f, msg, msg_args): + name = f.__module__ + logger = logging.getLogger(name) + + if logger.isEnabledFor(logging.DEBUG): + if PY3: + lineno = f.__code__.co_firstlineno + pathname = f.__code__.co_filename + else: + lineno = f.func_code.co_firstlineno + pathname = f.func_code.co_filename + + record = logging.LogRecord( + name=name, + level=logging.DEBUG, + pathname=pathname, + lineno=lineno, + msg=msg, + args=msg_args, + exc_info=None, + ) + + logger.handle(record) + + +def log_function(f): + """ Function decorator that logs every call to that function. + """ + func_name = f.__name__ + + @wraps(f) + def wrapped(*args, **kwargs): + name = f.__module__ + logger = logging.getLogger(name) + level = logging.DEBUG + + if logger.isEnabledFor(level): + bound_args = getcallargs(f, *args, **kwargs) + + def format(value): + r = str(value) + if len(r) > 50: + r = r[:50] + "..." + return r + + func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()] + + msg_args = {"func_name": func_name, "args": ", ".join(func_args)} + + _log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args) + + return f(*args, **kwargs) + + wrapped.__name__ = func_name + return wrapped + + +def time_function(f): + func_name = f.__name__ + + @wraps(f) + def wrapped(*args, **kwargs): + global _TIME_FUNC_ID + id = _TIME_FUNC_ID + _TIME_FUNC_ID += 1 + + start = time.clock() + + try: + _log_debug_as_f(f, "[FUNC START] {%s-%d}", (func_name, id)) + + r = f(*args, **kwargs) + finally: + end = time.clock() + _log_debug_as_f( + f, "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start) + ) + + return r + + return wrapped + + +def trace_function(f): + func_name = f.__name__ + linenum = f.func_code.co_firstlineno + pathname = f.func_code.co_filename + + @wraps(f) + def wrapped(*args, **kwargs): + name = f.__module__ + logger = logging.getLogger(name) + level = logging.DEBUG + + s = inspect.currentframe().f_back + + to_print = [ + "\t%s:%s %s. Args: args=%s, kwargs=%s" + % (pathname, linenum, func_name, args, kwargs) + ] + while s: + if True or s.f_globals["__name__"].startswith("synapse"): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + to_print.append( + "\t%s:%d %s. Args: %s" % (filename, lineno, function, args_string) + ) + + s = s.f_back + + msg = "\nTraceback for %s:\n" % (func_name,) + "\n".join(to_print) + + record = logging.LogRecord( + name=name, + level=level, + pathname=pathname, + lineno=lineno, + msg=msg, + args=None, + exc_info=None, + ) + + logger.handle(record) + + return f(*args, **kwargs) + + wrapped.__name__ = func_name + return wrapped + + +def get_previous_frames(): + s = inspect.currentframe().f_back.f_back + to_return = [] + while s: + if s.f_globals["__name__"].startswith("synapse"): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + to_return.append( + "{{ %s:%d %s - Args: %s }}" % (filename, lineno, function, args_string) + ) + + s = s.f_back + + return ", ".join(to_return) + + +def get_previous_frame(ignore=[]): + s = inspect.currentframe().f_back.f_back + + while s: + if s.f_globals["__name__"].startswith("synapse"): + if not any(s.f_globals["__name__"].startswith(ig) for ig in ignore): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + return "{{ %s:%d %s - Args: %s }}" % ( + filename, + lineno, + function, + args_string, + ) + + s = s.f_back + + return None diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 167e2c068a..edd6b42db3 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -22,7 +22,7 @@ from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily from twisted.internet import defer -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.logging.context import LoggingContext, PreserveLoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/notifier.py b/synapse/notifier.py index d398078eed..918ef64897 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -23,12 +23,12 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state +from synapse.logging.context import PreserveLoggingContext +from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import StreamToken from synapse.util.async_helpers import ObservableDeferred, timeout_deferred -from synapse.util.logcontext import PreserveLoggingContext -from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 809199fe88..521c6e2cd7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -29,6 +29,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.errors import StoreError +from synapse.logging.context import make_deferred_yieldable from synapse.push.presentable_names import ( calculate_room_name, descriptor_from_member_events, @@ -36,7 +37,6 @@ from synapse.push.presentable_names import ( ) from synapse.types import UserID from synapse.util.async_helpers import concurrently_execute -from synapse.util.logcontext import make_deferred_yieldable from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 97efb835ad..5ffdf2675d 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -62,9 +62,9 @@ from twisted.internet import defer from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from .commands import ( diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 36404b797d..6da71dc46f 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -17,8 +17,8 @@ to ensure idempotency when performing PUTs using the REST API.""" import logging +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util.async_helpers import ObservableDeferred -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 3318638d3e..5fefee4dde 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -25,7 +25,7 @@ from twisted.protocols.basic import FileSender from synapse.api.errors import Codes, SynapseError, cs_error from synapse.http.server import finish_request, respond_with_json -from synapse.util import logcontext +from synapse.logging.context import make_deferred_yieldable from synapse.util.stringutils import is_ascii logger = logging.getLogger(__name__) @@ -75,9 +75,7 @@ def respond_with_file(request, media_type, file_path, file_size=None, upload_nam add_file_headers(request, media_type, file_size, upload_name) with open(file_path, "rb") as f: - yield logcontext.make_deferred_yieldable( - FileSender().beginFileTransfer(f, request) - ) + yield make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) finish_request(request) else: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index df3d985a38..65afffbb42 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -33,8 +33,8 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) +from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import logcontext from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string @@ -463,7 +463,7 @@ class MediaRepository(object): ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -511,7 +511,7 @@ class MediaRepository(object): ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -596,7 +596,7 @@ class MediaRepository(object): return if thumbnailer.transpose_method is not None: - m_width, m_height = yield logcontext.defer_to_thread( + m_width, m_height = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.transpose ) @@ -616,11 +616,11 @@ class MediaRepository(object): for (t_width, t_height, t_type), t_method in iteritems(thumbnails): # Generate the thumbnail if t_method == "crop": - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type ) elif t_method == "scale": - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type ) else: diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index eff86836fb..25e5ac2848 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -24,9 +24,8 @@ import six from twisted.internet import defer from twisted.protocols.basic import FileSender -from synapse.util import logcontext +from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util.file_consumer import BackgroundFileConsumer -from synapse.util.logcontext import make_deferred_yieldable from ._base import Responder @@ -65,7 +64,7 @@ class MediaStorage(object): with self.store_into_file(file_info) as (f, fname, finish_cb): # Write to the main repository - yield logcontext.defer_to_thread( + yield defer_to_thread( self.hs.get_reactor(), _write_file_synchronously, source, f ) yield finish_cb() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 053346fb86..5871737bfd 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -42,11 +42,11 @@ from synapse.http.server import ( wrap_json_request_handler, ) from synapse.http.servlet import parse_integer, parse_string +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.media.v1._base import get_filename_from_headers from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from ._base import FileInfo diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 359b45ebfc..e8f559acc1 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -20,8 +20,7 @@ import shutil from twisted.internet import defer from synapse.config._base import Config -from synapse.util import logcontext -from synapse.util.logcontext import run_in_background +from synapse.logging.context import defer_to_thread, run_in_background from .media_storage import FileResponder @@ -125,7 +124,7 @@ class FileStorageProviderBackend(StorageProvider): if not os.path.exists(dirname): os.makedirs(dirname) - return logcontext.defer_to_thread( + return defer_to_thread( self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 1b454a56a1..9f708fa205 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -28,11 +28,11 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events.snapshot import EventContext +from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logutils import log_function from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 29589853c6..2f940dbae6 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -30,12 +30,12 @@ from prometheus_client import Histogram from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import get_domain_from_id from synapse.util import batch_iter from synapse.util.caches.descriptors import Cache -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.stringutils import exception_to_unicode # import a function which will return a monotonic time, in seconds diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 86f8485704..b486ca50eb 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -33,6 +33,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.logging.utils import log_function from synapse.metrics import BucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore @@ -45,8 +47,6 @@ from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable -from synapse.util.logutils import log_function from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 6d680d405a..09db872511 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -29,14 +29,14 @@ from synapse.api.room_versions import EventFormatVersions from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import get_domain_from_id -from synapse.util.logcontext import ( +from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import get_domain_from_id from synapse.util.metrics import Measure from ._base import SQLBaseStore diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index d9482a3843..386a9dbe14 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -41,12 +41,12 @@ from six.moves import range from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.engines import PostgresEngine from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 954e32fb2a..c6d2ce4404 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -21,10 +21,14 @@ import attr from twisted.internet import defer, task -from synapse.util.logcontext import PreserveLoggingContext +from synapse.logging import context, formatter logger = logging.getLogger(__name__) +# Compatibility alias, for existing logconfigs. +logcontext = context +logformatter = formatter + def unwrapFirstError(failure): # defer.gatherResults and DeferredLists wrap failures. @@ -46,7 +50,7 @@ class Clock(object): @defer.inlineCallbacks def sleep(self, seconds): d = defer.Deferred() - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): self._reactor.callLater(seconds, d.callback, seconds) res = yield d defer.returnValue(res) @@ -91,10 +95,10 @@ class Clock(object): """ def wrapped_callback(*args, **kwargs): - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): callback(*args, **kwargs) - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) def cancel_call_later(self, timer, ignore_errs=False): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 7757b8708a..58a6b8764f 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -23,13 +23,12 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.python import failure -from synapse.util import Clock, logcontext, unwrapFirstError - -from .logcontext import ( +from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) +from synapse.util import Clock, unwrapFirstError logger = logging.getLogger(__name__) @@ -153,7 +152,7 @@ def concurrently_execute(func, args, limit): except StopIteration: pass - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( defer.gatherResults( [run_in_background(_concurrently_execute_inner) for _ in range(limit)], consumeErrors=True, @@ -174,7 +173,7 @@ def yieldable_gather_results(func, iter, *args, **kwargs): Deferred[list]: Resolved when all functions have been invoked, or errors if one of the function calls fails. """ - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( defer.gatherResults( [run_in_background(func, item, *args, **kwargs) for item in iter], consumeErrors=True, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index d2f25063aa..675db2f448 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -24,7 +24,8 @@ from six import itervalues, string_types from twisted.internet import defer -from synapse.util import logcontext, unwrapFirstError +from synapse.logging.context import make_deferred_yieldable, preserve_fn +from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache @@ -388,7 +389,7 @@ class CacheDescriptor(_CacheDescriptorBase): except KeyError: ret = defer.maybeDeferred( - logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs + preserve_fn(self.function_to_call), obj, *args, **kwargs ) def onErr(f): @@ -408,7 +409,7 @@ class CacheDescriptor(_CacheDescriptorBase): observer = result_d.observe() if isinstance(observer, defer.Deferred): - return logcontext.make_deferred_yieldable(observer) + return make_deferred_yieldable(observer) else: return observer @@ -563,7 +564,7 @@ class CacheListDescriptor(_CacheDescriptorBase): cached_defers.append( defer.maybeDeferred( - logcontext.preserve_fn(self.function_to_call), **args_to_call + preserve_fn(self.function_to_call), **args_to_call ).addCallbacks(complete_all, errback) ) @@ -571,7 +572,7 @@ class CacheListDescriptor(_CacheDescriptorBase): d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError ) - return logcontext.make_deferred_yieldable(d) + return make_deferred_yieldable(d) else: return results diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index cbe54d45dd..d6908e169d 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -16,9 +16,9 @@ import logging from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import register_cache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -78,7 +78,7 @@ class ResponseCache(object): *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with - logcontext.run_in_background). + synapse.logging.context.run_in_background). Can return either a new Deferred (which also doesn't follow the synapse logcontext rules), or, if *deferred* was already complete, the actual diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 5a79db821c..45af8d3eeb 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -17,8 +17,8 @@ import logging from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 629ed44149..8b17d1c8b8 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -17,7 +17,7 @@ from six.moves import queue from twisted.internet import threads -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background class BackgroundFileConsumer(object): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py deleted file mode 100644 index 30dfa1d6b2..0000000000 --- a/synapse/util/logcontext.py +++ /dev/null @@ -1,693 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Thread-local-alike tracking of log contexts within synapse - -This module provides objects and utilities for tracking contexts through -synapse code, so that log lines can include a request identifier, and so that -CPU and database activity can be accounted for against the request that caused -them. - -See doc/log_contexts.rst for details on how this works. -""" - -import logging -import threading -import types - -from twisted.internet import defer, threads - -logger = logging.getLogger(__name__) - -try: - import resource - - # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined - # to be 1 on linux so we hard code it. - RUSAGE_THREAD = 1 - - # If the system doesn't support RUSAGE_THREAD then this should throw an - # exception. - resource.getrusage(RUSAGE_THREAD) - - def get_thread_resource_usage(): - return resource.getrusage(RUSAGE_THREAD) - - -except Exception: - # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we - # won't track resource usage by returning None. - def get_thread_resource_usage(): - return None - - -# get an id for the current thread. -# -# threading.get_ident doesn't actually return an OS-level tid, and annoyingly, -# on Linux it actually returns the same value either side of a fork() call. However -# we only fork in one place, so it's not worth the hoop-jumping to get a real tid. -# -get_thread_id = threading.get_ident - - -class ContextResourceUsage(object): - """Object for tracking the resources used by a log context - - Attributes: - ru_utime (float): user CPU time (in seconds) - ru_stime (float): system CPU time (in seconds) - db_txn_count (int): number of database transactions done - db_sched_duration_sec (float): amount of time spent waiting for a - database connection - db_txn_duration_sec (float): amount of time spent doing database - transactions (excluding scheduling time) - evt_db_fetch_count (int): number of events requested from the database - """ - - __slots__ = [ - "ru_stime", - "ru_utime", - "db_txn_count", - "db_txn_duration_sec", - "db_sched_duration_sec", - "evt_db_fetch_count", - ] - - def __init__(self, copy_from=None): - """Create a new ContextResourceUsage - - Args: - copy_from (ContextResourceUsage|None): if not None, an object to - copy stats from - """ - if copy_from is None: - self.reset() - else: - self.ru_utime = copy_from.ru_utime - self.ru_stime = copy_from.ru_stime - self.db_txn_count = copy_from.db_txn_count - - self.db_txn_duration_sec = copy_from.db_txn_duration_sec - self.db_sched_duration_sec = copy_from.db_sched_duration_sec - self.evt_db_fetch_count = copy_from.evt_db_fetch_count - - def copy(self): - return ContextResourceUsage(copy_from=self) - - def reset(self): - self.ru_stime = 0.0 - self.ru_utime = 0.0 - self.db_txn_count = 0 - - self.db_txn_duration_sec = 0 - self.db_sched_duration_sec = 0 - self.evt_db_fetch_count = 0 - - def __repr__(self): - return ( - "" - ) % ( - self.ru_stime, - self.ru_utime, - self.db_txn_count, - self.db_txn_duration_sec, - self.db_sched_duration_sec, - self.evt_db_fetch_count, - ) - - def __iadd__(self, other): - """Add another ContextResourceUsage's stats to this one's. - - Args: - other (ContextResourceUsage): the other resource usage object - """ - self.ru_utime += other.ru_utime - self.ru_stime += other.ru_stime - self.db_txn_count += other.db_txn_count - self.db_txn_duration_sec += other.db_txn_duration_sec - self.db_sched_duration_sec += other.db_sched_duration_sec - self.evt_db_fetch_count += other.evt_db_fetch_count - return self - - def __isub__(self, other): - self.ru_utime -= other.ru_utime - self.ru_stime -= other.ru_stime - self.db_txn_count -= other.db_txn_count - self.db_txn_duration_sec -= other.db_txn_duration_sec - self.db_sched_duration_sec -= other.db_sched_duration_sec - self.evt_db_fetch_count -= other.evt_db_fetch_count - return self - - def __add__(self, other): - res = ContextResourceUsage(copy_from=self) - res += other - return res - - def __sub__(self, other): - res = ContextResourceUsage(copy_from=self) - res -= other - return res - - -class LoggingContext(object): - """Additional context for log formatting. Contexts are scoped within a - "with" block. - - If a parent is given when creating a new context, then: - - logging fields are copied from the parent to the new context on entry - - when the new context exits, the cpu usage stats are copied from the - child to the parent - - Args: - name (str): Name for the context for debugging. - parent_context (LoggingContext|None): The parent of the new context - """ - - __slots__ = [ - "previous_context", - "name", - "parent_context", - "_resource_usage", - "usage_start", - "main_thread", - "alive", - "request", - "tag", - ] - - thread_local = threading.local() - - class Sentinel(object): - """Sentinel to represent the root context""" - - __slots__ = [] - - def __str__(self): - return "sentinel" - - def copy_to(self, record): - pass - - def start(self): - pass - - def stop(self): - pass - - def add_database_transaction(self, duration_sec): - pass - - def add_database_scheduled(self, sched_sec): - pass - - def record_event_fetch(self, event_count): - pass - - def __nonzero__(self): - return False - - __bool__ = __nonzero__ # python3 - - sentinel = Sentinel() - - def __init__(self, name=None, parent_context=None, request=None): - self.previous_context = LoggingContext.current_context() - self.name = name - - # track the resources used by this context so far - self._resource_usage = ContextResourceUsage() - - # If alive has the thread resource usage when the logcontext last - # became active. - self.usage_start = None - - self.main_thread = get_thread_id() - self.request = None - self.tag = "" - self.alive = True - - self.parent_context = parent_context - - if self.parent_context is not None: - self.parent_context.copy_to(self) - - if request is not None: - # the request param overrides the request from the parent context - self.request = request - - def __str__(self): - if self.request: - return str(self.request) - return "%s@%x" % (self.name, id(self)) - - @classmethod - def current_context(cls): - """Get the current logging context from thread local storage - - Returns: - LoggingContext: the current logging context - """ - return getattr(cls.thread_local, "current_context", cls.sentinel) - - @classmethod - def set_current_context(cls, context): - """Set the current logging context in thread local storage - Args: - context(LoggingContext): The context to activate. - Returns: - The context that was previously active - """ - current = cls.current_context() - - if current is not context: - current.stop() - cls.thread_local.current_context = context - context.start() - return current - - def __enter__(self): - """Enters this logging context into thread local storage""" - old_context = self.set_current_context(self) - if self.previous_context != old_context: - logger.warn( - "Expected previous context %r, found %r", - self.previous_context, - old_context, - ) - self.alive = True - - return self - - def __exit__(self, type, value, traceback): - """Restore the logging context in thread local storage to the state it - was before this context was entered. - Returns: - None to avoid suppressing any exceptions that were thrown. - """ - current = self.set_current_context(self.previous_context) - if current is not self: - if current is self.sentinel: - logger.warning("Expected logging context %s was lost", self) - else: - logger.warning( - "Expected logging context %s but found %s", self, current - ) - self.previous_context = None - self.alive = False - - # if we have a parent, pass our CPU usage stats on - if self.parent_context is not None and hasattr( - self.parent_context, "_resource_usage" - ): - self.parent_context._resource_usage += self._resource_usage - - # reset them in case we get entered again - self._resource_usage.reset() - - def copy_to(self, record): - """Copy logging fields from this context to a log record or - another LoggingContext - """ - - # 'request' is the only field we currently use in the logger, so that's - # all we need to copy - record.request = self.request - - def start(self): - if get_thread_id() != self.main_thread: - logger.warning("Started logcontext %s on different thread", self) - return - - # If we haven't already started record the thread resource usage so - # far - if not self.usage_start: - self.usage_start = get_thread_resource_usage() - - def stop(self): - if get_thread_id() != self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) - return - - # When we stop, let's record the cpu used since we started - if not self.usage_start: - logger.warning("Called stop on logcontext %s without calling start", self) - return - - utime_delta, stime_delta = self._get_cputime() - self._resource_usage.ru_utime += utime_delta - self._resource_usage.ru_stime += stime_delta - - self.usage_start = None - - def get_resource_usage(self): - """Get resources used by this logcontext so far. - - Returns: - ContextResourceUsage: a *copy* of the object tracking resource - usage so far - """ - # we always return a copy, for consistency - res = self._resource_usage.copy() - - # If we are on the correct thread and we're currently running then we - # can include resource usage so far. - is_main_thread = get_thread_id() == self.main_thread - if self.alive and self.usage_start and is_main_thread: - utime_delta, stime_delta = self._get_cputime() - res.ru_utime += utime_delta - res.ru_stime += stime_delta - - return res - - def _get_cputime(self): - """Get the cpu usage time so far - - Returns: Tuple[float, float]: seconds in user mode, seconds in system mode - """ - current = get_thread_resource_usage() - - utime_delta = current.ru_utime - self.usage_start.ru_utime - stime_delta = current.ru_stime - self.usage_start.ru_stime - - # sanity check - if utime_delta < 0: - logger.error( - "utime went backwards! %f < %f", - current.ru_utime, - self.usage_start.ru_utime, - ) - utime_delta = 0 - - if stime_delta < 0: - logger.error( - "stime went backwards! %f < %f", - current.ru_stime, - self.usage_start.ru_stime, - ) - stime_delta = 0 - - return utime_delta, stime_delta - - def add_database_transaction(self, duration_sec): - if duration_sec < 0: - raise ValueError("DB txn time can only be non-negative") - self._resource_usage.db_txn_count += 1 - self._resource_usage.db_txn_duration_sec += duration_sec - - def add_database_scheduled(self, sched_sec): - """Record a use of the database pool - - Args: - sched_sec (float): number of seconds it took us to get a - connection - """ - if sched_sec < 0: - raise ValueError("DB scheduling time can only be non-negative") - self._resource_usage.db_sched_duration_sec += sched_sec - - def record_event_fetch(self, event_count): - """Record a number of events being fetched from the db - - Args: - event_count (int): number of events being fetched - """ - self._resource_usage.evt_db_fetch_count += event_count - - -class LoggingContextFilter(logging.Filter): - """Logging filter that adds values from the current logging context to each - record. - Args: - **defaults: Default values to avoid formatters complaining about - missing fields - """ - - def __init__(self, **defaults): - self.defaults = defaults - - def filter(self, record): - """Add each fields from the logging contexts to the record. - Returns: - True to include the record in the log output. - """ - context = LoggingContext.current_context() - for key, value in self.defaults.items(): - setattr(record, key, value) - - # context should never be None, but if it somehow ends up being, then - # we end up in a death spiral of infinite loops, so let's check, for - # robustness' sake. - if context is not None: - context.copy_to(record) - - return True - - -class PreserveLoggingContext(object): - """Captures the current logging context and restores it when the scope is - exited. Used to restore the context after a function using - @defer.inlineCallbacks is resumed by a callback from the reactor.""" - - __slots__ = ["current_context", "new_context", "has_parent"] - - def __init__(self, new_context=None): - if new_context is None: - new_context = LoggingContext.sentinel - self.new_context = new_context - - def __enter__(self): - """Captures the current logging context""" - self.current_context = LoggingContext.set_current_context(self.new_context) - - if self.current_context: - self.has_parent = self.current_context.previous_context is not None - if not self.current_context.alive: - logger.debug("Entering dead context: %s", self.current_context) - - def __exit__(self, type, value, traceback): - """Restores the current logging context""" - context = LoggingContext.set_current_context(self.current_context) - - if context != self.new_context: - if context is LoggingContext.sentinel: - logger.warning("Expected logging context %s was lost", self.new_context) - else: - logger.warning( - "Expected logging context %s but found %s", - self.new_context, - context, - ) - - if self.current_context is not LoggingContext.sentinel: - if not self.current_context.alive: - logger.debug("Restoring dead context: %s", self.current_context) - - -def nested_logging_context(suffix, parent_context=None): - """Creates a new logging context as a child of another. - - The nested logging context will have a 'request' made up of the parent context's - request, plus the given suffix. - - CPU/db usage stats will be added to the parent context's on exit. - - Normal usage looks like: - - with nested_logging_context(suffix): - # ... do stuff - - Args: - suffix (str): suffix to add to the parent context's 'request'. - parent_context (LoggingContext|None): parent context. Will use the current context - if None. - - Returns: - LoggingContext: new logging context. - """ - if parent_context is None: - parent_context = LoggingContext.current_context() - return LoggingContext( - parent_context=parent_context, request=parent_context.request + "-" + suffix - ) - - -def preserve_fn(f): - """Function decorator which wraps the function with run_in_background""" - - def g(*args, **kwargs): - return run_in_background(f, *args, **kwargs) - - return g - - -def run_in_background(f, *args, **kwargs): - """Calls a function, ensuring that the current context is restored after - return from the function, and that the sentinel context is set once the - deferred returned by the function completes. - - Useful for wrapping functions that return a deferred or coroutine, which you don't - yield or await on (for instance because you want to pass it to - deferred.gatherResults()). - - Note that if you completely discard the result, you should make sure that - `f` doesn't raise any deferred exceptions, otherwise a scary-looking - CRITICAL error about an unhandled error will be logged without much - indication about where it came from. - """ - current = LoggingContext.current_context() - try: - res = f(*args, **kwargs) - except: # noqa: E722 - # the assumption here is that the caller doesn't want to be disturbed - # by synchronous exceptions, so let's turn them into Failures. - return defer.fail() - - if isinstance(res, types.CoroutineType): - res = defer.ensureDeferred(res) - - if not isinstance(res, defer.Deferred): - return res - - if res.called and not res.paused: - # The function should have maintained the logcontext, so we can - # optimise out the messing about - return res - - # The function may have reset the context before returning, so - # we need to restore it now. - ctx = LoggingContext.set_current_context(current) - - # The original context will be restored when the deferred - # completes, but there is nothing waiting for it, so it will - # get leaked into the reactor or some other function which - # wasn't expecting it. We therefore need to reset the context - # here. - # - # (If this feels asymmetric, consider it this way: we are - # effectively forking a new thread of execution. We are - # probably currently within a ``with LoggingContext()`` block, - # which is supposed to have a single entry and exit point. But - # by spawning off another deferred, we are effectively - # adding a new exit point.) - res.addBoth(_set_context_cb, ctx) - return res - - -def make_deferred_yieldable(deferred): - """Given a deferred, make it follow the Synapse logcontext rules: - - If the deferred has completed (or is not actually a Deferred), essentially - does nothing (just returns another completed deferred with the - result/failure). - - If the deferred has not yet completed, resets the logcontext before - returning a deferred. Then, when the deferred completes, restores the - current logcontext before running callbacks/errbacks. - - (This is more-or-less the opposite operation to run_in_background.) - """ - if not isinstance(deferred, defer.Deferred): - return deferred - - if deferred.called and not deferred.paused: - # it looks like this deferred is ready to run any callbacks we give it - # immediately. We may as well optimise out the logcontext faffery. - return deferred - - # ok, we can't be sure that a yield won't block, so let's reset the - # logcontext, and add a callback to the deferred to restore it. - prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) - deferred.addBoth(_set_context_cb, prev_context) - return deferred - - -def _set_context_cb(result, context): - """A callback function which just sets the logging context""" - LoggingContext.set_current_context(context) - return result - - -def defer_to_thread(reactor, f, *args, **kwargs): - """ - Calls the function `f` using a thread from the reactor's default threadpool and - returns the result as a Deferred. - - Creates a new logcontext for `f`, which is created as a child of the current - logcontext (so its CPU usage metrics will get attributed to the current - logcontext). `f` should preserve the logcontext it is given. - - The result deferred follows the Synapse logcontext rules: you should `yield` - on it. - - Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked, and whose threadpool we should use for the - function. - - Normally this will be hs.get_reactor(). - - f (callable): The function to call. - - args: positional arguments to pass to f. - - kwargs: keyword arguments to pass to f. - - Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an - errback if `f` throws an exception. - """ - return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) - - -def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): - """ - A wrapper for twisted.internet.threads.deferToThreadpool, which handles - logcontexts correctly. - - Calls the function `f` using a thread from the given threadpool and returns - the result as a Deferred. - - Creates a new logcontext for `f`, which is created as a child of the current - logcontext (so its CPU usage metrics will get attributed to the current - logcontext). `f` should preserve the logcontext it is given. - - The result deferred follows the Synapse logcontext rules: you should `yield` - on it. - - Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked. Normally this will be hs.get_reactor(). - - threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for - running `f`. Normally this will be hs.get_reactor().getThreadPool(). - - f (callable): The function to call. - - args: positional arguments to pass to f. - - kwargs: keyword arguments to pass to f. - - Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an - errback if `f` throws an exception. - """ - logcontext = LoggingContext.current_context() - - def g(): - with LoggingContext(parent_context=logcontext): - return f(*args, **kwargs) - - return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py deleted file mode 100644 index fbf570c756..0000000000 --- a/synapse/util/logformatter.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import traceback - -from six import StringIO - - -class LogFormatter(logging.Formatter): - """Log formatter which gives more detail for exceptions - - This is the same as the standard log formatter, except that when logging - exceptions [typically via log.foo("msg", exc_info=1)], it prints the - sequence that led up to the point at which the exception was caught. - (Normally only stack frames between the point the exception was raised and - where it was caught are logged). - """ - - def __init__(self, *args, **kwargs): - super(LogFormatter, self).__init__(*args, **kwargs) - - def formatException(self, ei): - sio = StringIO() - (typ, val, tb) = ei - - # log the stack above the exception capture point if possible, but - # check that we actually have an f_back attribute to work around - # https://twistedmatrix.com/trac/ticket/9305 - - if tb and hasattr(tb.tb_frame, "f_back"): - sio.write("Capture point (most recent call last):\n") - traceback.print_stack(tb.tb_frame.f_back, None, sio) - - traceback.print_exception(typ, val, tb, None, sio) - s = sio.getvalue() - sio.close() - if s[-1:] == "\n": - s = s[:-1] - return s diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py deleted file mode 100644 index 7df0fa6087..0000000000 --- a/synapse/util/logutils.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -import logging -import time -from functools import wraps -from inspect import getcallargs - -from six import PY3 - -_TIME_FUNC_ID = 0 - - -def _log_debug_as_f(f, msg, msg_args): - name = f.__module__ - logger = logging.getLogger(name) - - if logger.isEnabledFor(logging.DEBUG): - if PY3: - lineno = f.__code__.co_firstlineno - pathname = f.__code__.co_filename - else: - lineno = f.func_code.co_firstlineno - pathname = f.func_code.co_filename - - record = logging.LogRecord( - name=name, - level=logging.DEBUG, - pathname=pathname, - lineno=lineno, - msg=msg, - args=msg_args, - exc_info=None, - ) - - logger.handle(record) - - -def log_function(f): - """ Function decorator that logs every call to that function. - """ - func_name = f.__name__ - - @wraps(f) - def wrapped(*args, **kwargs): - name = f.__module__ - logger = logging.getLogger(name) - level = logging.DEBUG - - if logger.isEnabledFor(level): - bound_args = getcallargs(f, *args, **kwargs) - - def format(value): - r = str(value) - if len(r) > 50: - r = r[:50] + "..." - return r - - func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()] - - msg_args = {"func_name": func_name, "args": ", ".join(func_args)} - - _log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args) - - return f(*args, **kwargs) - - wrapped.__name__ = func_name - return wrapped - - -def time_function(f): - func_name = f.__name__ - - @wraps(f) - def wrapped(*args, **kwargs): - global _TIME_FUNC_ID - id = _TIME_FUNC_ID - _TIME_FUNC_ID += 1 - - start = time.clock() - - try: - _log_debug_as_f(f, "[FUNC START] {%s-%d}", (func_name, id)) - - r = f(*args, **kwargs) - finally: - end = time.clock() - _log_debug_as_f( - f, "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start) - ) - - return r - - return wrapped - - -def trace_function(f): - func_name = f.__name__ - linenum = f.func_code.co_firstlineno - pathname = f.func_code.co_filename - - @wraps(f) - def wrapped(*args, **kwargs): - name = f.__module__ - logger = logging.getLogger(name) - level = logging.DEBUG - - s = inspect.currentframe().f_back - - to_print = [ - "\t%s:%s %s. Args: args=%s, kwargs=%s" - % (pathname, linenum, func_name, args, kwargs) - ] - while s: - if True or s.f_globals["__name__"].startswith("synapse"): - filename, lineno, function, _, _ = inspect.getframeinfo(s) - args_string = inspect.formatargvalues(*inspect.getargvalues(s)) - - to_print.append( - "\t%s:%d %s. Args: %s" % (filename, lineno, function, args_string) - ) - - s = s.f_back - - msg = "\nTraceback for %s:\n" % (func_name,) + "\n".join(to_print) - - record = logging.LogRecord( - name=name, - level=level, - pathname=pathname, - lineno=lineno, - msg=msg, - args=None, - exc_info=None, - ) - - logger.handle(record) - - return f(*args, **kwargs) - - wrapped.__name__ = func_name - return wrapped - - -def get_previous_frames(): - s = inspect.currentframe().f_back.f_back - to_return = [] - while s: - if s.f_globals["__name__"].startswith("synapse"): - filename, lineno, function, _, _ = inspect.getframeinfo(s) - args_string = inspect.formatargvalues(*inspect.getargvalues(s)) - - to_return.append( - "{{ %s:%d %s - Args: %s }}" % (filename, lineno, function, args_string) - ) - - s = s.f_back - - return ", ".join(to_return) - - -def get_previous_frame(ignore=[]): - s = inspect.currentframe().f_back.f_back - - while s: - if s.f_globals["__name__"].startswith("synapse"): - if not any(s.f_globals["__name__"].startswith(ig) for ig in ignore): - filename, lineno, function, _, _ = inspect.getframeinfo(s) - args_string = inspect.formatargvalues(*inspect.getargvalues(s)) - - return "{{ %s:%d %s - Args: %s }}" % ( - filename, - lineno, - function, - args_string, - ) - - s = s.f_back - - return None diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 01284d3cf8..c30b6de19c 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -20,8 +20,8 @@ from prometheus_client import Counter from twisted.internet import defer +from synapse.logging.context import LoggingContext from synapse.metrics import InFlightGauge -from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 06defa8199..27bceac00e 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -20,7 +20,7 @@ import logging from twisted.internet import defer from synapse.api.errors import LimitExceededError -from synapse.util.logcontext import ( +from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background, diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 1a77456498..d8d0ceae51 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -17,7 +17,7 @@ import random from twisted.internet import defer -import synapse.util.logcontext +import synapse.logging.context from synapse.api.errors import CodeMessageException logger = logging.getLogger(__name__) @@ -225,4 +225,4 @@ class RetryDestinationLimiter(object): logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. - synapse.util.logcontext.run_in_background(store_retry_timings) + synapse.logging.context.run_in_background(store_retry_timings) diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index db9f86bdac..04b8c2c07c 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -22,7 +22,7 @@ from synapse.appservice.scheduler import ( _ServiceQueuer, _TransactionController, ) -from synapse.util.logcontext import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable from tests import unittest diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 5a355f00cc..795703967d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -30,9 +30,12 @@ from synapse.crypto.keyring import ( ServerKeyFetcher, StoreKeyFetcher, ) +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, +) from synapse.storage.keys import FetchKeyResult -from synapse.util import logcontext -from synapse.util.logcontext import LoggingContext from tests import unittest @@ -131,7 +134,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def get_perspectives(**kwargs): self.assertEquals(LoggingContext.current_context().request, "11") - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield persp_deferred defer.returnValue(persp_resp) @@ -158,7 +161,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) - yield logcontext.make_deferred_yieldable(res_deferreds[0]) + yield make_deferred_yieldable(res_deferreds[0]) # let verify_json_objects_for_server finish its work before we kill the # logcontext @@ -184,7 +187,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) - yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) + yield make_deferred_yieldable(res_deferreds_2[0]) # let verify_json_objects_for_server finish its work before we kill the # logcontext diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 417fda3ab2..a49f9b3224 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -36,8 +36,8 @@ from synapse.http.federation.matrix_federation_agent import ( _cache_period_from_headers, ) from synapse.http.federation.srv_resolver import Server +from synapse.logging.context import LoggingContext from synapse.util.caches.ttlcache import TTLCache -from synapse.util.logcontext import LoggingContext from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.server import FakeTransport, ThreadedMemoryReactorClock diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index cf6c6e95b5..65b51dc981 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -22,7 +22,7 @@ from twisted.internet.error import ConnectError from twisted.names import dns, error from synapse.http.federation.srv_resolver import SrvResolver -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import LoggingContext from tests import unittest from tests.utils import MockClock diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index c4c0d9b968..b9d6d7ad1c 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -29,7 +29,7 @@ from synapse.http.matrixfederationclient import ( MatrixFederationHttpClient, MatrixFederationRequest, ) -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import LoggingContext from tests.server import FakeTransport from tests.unittest import HomeserverTestCase diff --git a/tests/patch_inline_callbacks.py b/tests/patch_inline_callbacks.py index ee0add3455..220884311c 100644 --- a/tests/patch_inline_callbacks.py +++ b/tests/patch_inline_callbacks.py @@ -28,7 +28,7 @@ def do_patch(): Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit """ - from synapse.util.logcontext import LoggingContext + from synapse.logging.context import LoggingContext orig_inline_callbacks = defer.inlineCallbacks diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 22c3f73ef3..8ce6bb62da 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -18,8 +18,8 @@ from mock import Mock from twisted.internet.defer import Deferred import synapse.rest.admin +from synapse.logging.context import make_deferred_yieldable from synapse.rest.client.v1 import login, room -from synapse.util.logcontext import make_deferred_yieldable from tests.unittest import HomeserverTestCase diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 708dc26e61..a8adc9a61d 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -2,9 +2,9 @@ from mock import Mock, call from twisted.internet import defer, reactor +from synapse.logging.context import LoggingContext from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache from synapse.util import Clock -from synapse.util.logcontext import LoggingContext from tests import unittest from tests.utils import MockClock diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 39c9342423..bc662b61db 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -24,11 +24,11 @@ from six.moves.urllib import parse from twisted.internet.defer import Deferred +from synapse.logging.context import make_deferred_yieldable from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend -from synapse.util.logcontext import make_deferred_yieldable from tests import unittest diff --git a/tests/test_federation.py b/tests/test_federation.py index 6a8339b561..a73f18f88e 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -3,9 +3,9 @@ from mock import Mock from twisted.internet.defer import maybeDeferred, succeed from synapse.events import FrozenEvent +from synapse.logging.context import LoggingContext from synapse.types import Requester, UserID from synapse.util import Clock -from synapse.util.logcontext import LoggingContext from tests import unittest from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver diff --git a/tests/test_server.py b/tests/test_server.py index da29ae92ce..ba08483a4b 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -26,8 +26,8 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource from synapse.http.site import SynapseSite, logger +from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock -from synapse.util.logcontext import make_deferred_yieldable from tests import unittest from tests.server import ( diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 813f984199..2d96b0fa8d 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -17,7 +17,7 @@ import os import twisted.logger -from synapse.util.logcontext import LoggingContextFilter +from synapse.logging.context import LoggingContextFilter class ToTwistedHandler(logging.Handler): diff --git a/tests/unittest.py b/tests/unittest.py index d26804b5b5..a09e76c7c2 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -33,9 +33,9 @@ from synapse.api.constants import EventTypes from synapse.config.homeserver import HomeServerConfig from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest +from synapse.logging.context import LoggingContext from synapse.server import HomeServer from synapse.types import Requester, UserID, create_requester -from synapse.util.logcontext import LoggingContext from tests.server import get_clock, make_request, render, setup_test_homeserver from tests.test_utils.logging_setup import setup_logging diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 6f8f52537c..7807328e2f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -21,7 +21,11 @@ import mock from twisted.internet import defer, reactor from synapse.api.errors import SynapseError -from synapse.util import logcontext +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, +) from synapse.util.caches import descriptors from tests import unittest @@ -32,7 +36,7 @@ logger = logging.getLogger(__name__) def run_on_reactor(): d = defer.Deferred() reactor.callLater(0, d.callback, 0) - return logcontext.make_deferred_yieldable(d) + return make_deferred_yieldable(d) class CacheTestCase(unittest.TestCase): @@ -153,7 +157,7 @@ class DescriptorTestCase(unittest.TestCase): def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield complete_lookup defer.returnValue(1) @@ -161,10 +165,10 @@ class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def do_lookup(): - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) defer.returnValue(r) def check_result(r): @@ -174,18 +178,12 @@ class DescriptorTestCase(unittest.TestCase): # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d1.addCallback(check_result) # and another d2 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d2.addCallback(check_result) # let the lookup complete @@ -210,29 +208,25 @@ class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def do_lookup(): - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.name = "c1" try: d = obj.fn(1) self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, + LoggingContext.current_context(), LoggingContext.sentinel ) yield d self.fail("No exception thrown") except SynapseError: pass - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) obj = Cls() # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) return d1 @@ -288,23 +282,20 @@ class CachedListDescriptorTestCase(unittest.TestCase): @descriptors.cachedList("fn", "args1", inlineCallbacks=True) def list_fn(self, args1, arg2): - assert logcontext.LoggingContext.current_context().request == "c1" + assert LoggingContext.current_context().request == "c1" # we want this to behave like an asynchronous function yield run_on_reactor() - assert logcontext.LoggingContext.current_context().request == "c1" + assert LoggingContext.current_context().request == "c1" defer.returnValue(self.mock(args1, arg2)) - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.request = "c1" obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) r = yield d1 - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index bf85d3b8ec..f60918069a 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -16,9 +16,8 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred from twisted.internet.task import Clock -from synapse.util import logcontext +from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.util.async_helpers import timeout_deferred -from synapse.util.logcontext import LoggingContext from tests.unittest import TestCase @@ -69,14 +68,14 @@ class TimeoutDeferredTest(TestCase): @defer.inlineCallbacks def blocking(): non_completing_d = Deferred() - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): try: yield non_completing_d except CancelledError: blocking_was_cancelled[0] = True raise - with logcontext.LoggingContext("one") as context_one: + with LoggingContext("one") as context_one: # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index ec7ba9719c..0ec8ef90ce 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -19,7 +19,8 @@ from six.moves import range from twisted.internet import defer, reactor from twisted.internet.defer import CancelledError -from synapse.util import Clock, logcontext +from synapse.logging.context import LoggingContext +from synapse.util import Clock from synapse.util.async_helpers import Linearizer from tests import unittest @@ -51,13 +52,13 @@ class LinearizerTestCase(unittest.TestCase): @defer.inlineCallbacks def func(i, sleep=False): - with logcontext.LoggingContext("func(%s)" % i) as lc: + with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): - self.assertEqual(logcontext.LoggingContext.current_context(), lc) + self.assertEqual(LoggingContext.current_context(), lc) if sleep: yield Clock(reactor).sleep(0) - self.assertEqual(logcontext.LoggingContext.current_context(), lc) + self.assertEqual(LoggingContext.current_context(), lc) func(0, sleep=True) for i in range(1, 100): diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 8d69fbf111..8b8455c8b7 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -1,8 +1,14 @@ import twisted.python.failure from twisted.internet import defer, reactor -from synapse.util import Clock, logcontext -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, + nested_logging_context, + run_in_background, +) +from synapse.util import Clock from .. import unittest @@ -43,7 +49,7 @@ class LoggingContextTestCase(unittest.TestCase): context_one.request = "one" # fire off function, but don't wait on it. - d2 = logcontext.run_in_background(function) + d2 = run_in_background(function) def cb(res): callback_completed[0] = True @@ -85,7 +91,7 @@ class LoggingContextTestCase(unittest.TestCase): def test_run_in_background_with_non_blocking_fn(self): @defer.inlineCallbacks def nonblocking_function(): - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield defer.succeed(None) return self._test_run_in_background(nonblocking_function) @@ -94,7 +100,7 @@ class LoggingContextTestCase(unittest.TestCase): # a function which returns a deferred which looks like it has been # called, but is actually paused def testfunc(): - return logcontext.make_deferred_yieldable(_chained_deferred_function()) + return make_deferred_yieldable(_chained_deferred_function()) return self._test_run_in_background(testfunc) @@ -128,7 +134,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable(blocking_function()) + d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) @@ -144,7 +150,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable(_chained_deferred_function()) + d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) @@ -161,7 +167,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable("bum") + d1 = make_deferred_yieldable("bum") self._check_test_key("one") r = yield d1 @@ -170,7 +176,7 @@ class LoggingContextTestCase(unittest.TestCase): def test_nested_logging_context(self): with LoggingContext(request="foo"): - nested_context = logcontext.nested_logging_context(suffix="bar") + nested_context = nested_logging_context(suffix="bar") self.assertEqual(nested_context.request, "foo-bar") diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py index 297aebbfbe..0fb60caacb 100644 --- a/tests/util/test_logformatter.py +++ b/tests/util/test_logformatter.py @@ -14,7 +14,7 @@ # limitations under the License. import sys -from synapse.util.logformatter import LogFormatter +from synapse.logging.formatter import LogFormatter from tests import unittest diff --git a/tests/utils.py b/tests/utils.py index da43166f3a..d8e55b0801 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -34,6 +34,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer +from synapse.logging.context import LoggingContext from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine @@ -42,7 +43,6 @@ from synapse.storage.prepare_database import ( _setup_new_database, prepare_database, ) -from synapse.util.logcontext import LoggingContext from synapse.util.ratelimitutils import FederationRateLimiter # set this to True to run the tests against postgres instead of sqlite. -- cgit 1.5.1 From 4806651744616bf48abf408034ab9560e33f60ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 23:00:55 +1000 Subject: Replace returnValue with return (#5736) --- changelog.d/5736.misc | 1 + docs/log_contexts.rst | 2 +- synapse/api/auth.py | 44 ++++---- synapse/api/filtering.py | 2 +- synapse/app/frontend_proxy.py | 8 +- synapse/app/homeserver.py | 2 +- synapse/appservice/__init__.py | 28 +++--- synapse/appservice/api.py | 38 +++---- synapse/appservice/scheduler.py | 4 +- synapse/crypto/keyring.py | 14 +-- synapse/events/builder.py | 16 ++- synapse/events/snapshot.py | 28 +++--- synapse/events/third_party_rules.py | 8 +- synapse/events/utils.py | 4 +- synapse/federation/federation_base.py | 6 +- synapse/federation/federation_client.py | 46 ++++----- synapse/federation/federation_server.py | 77 ++++++-------- synapse/federation/sender/per_destination_queue.py | 4 +- synapse/federation/sender/transaction_manager.py | 2 +- synapse/federation/transport/client.py | 30 +++--- synapse/groups/attestations.py | 2 +- synapse/groups/groups_server.py | 92 ++++++++--------- synapse/handlers/account_data.py | 4 +- synapse/handlers/account_validity.py | 6 +- synapse/handlers/acme.py | 2 +- synapse/handlers/admin.py | 10 +- synapse/handlers/appservice.py | 22 ++-- synapse/handlers/auth.py | 44 ++++---- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 22 ++-- synapse/handlers/directory.py | 14 +-- synapse/handlers/e2e_keys.py | 10 +- synapse/handlers/e2e_room_keys.py | 8 +- synapse/handlers/events.py | 6 +- synapse/handlers/federation.py | 82 ++++++++------- synapse/handlers/groups_local.py | 26 ++--- synapse/handlers/identity.py | 18 ++-- synapse/handlers/initial_sync.py | 54 +++++----- synapse/handlers/message.py | 32 +++--- synapse/handlers/pagination.py | 14 ++- synapse/handlers/presence.py | 54 +++++----- synapse/handlers/profile.py | 18 ++-- synapse/handlers/receipts.py | 14 +-- synapse/handlers/register.py | 16 +-- synapse/handlers/room.py | 16 +-- synapse/handlers/room_list.py | 10 +- synapse/handlers/room_member.py | 42 ++++---- synapse/handlers/room_member_worker.py | 2 +- synapse/handlers/search.py | 14 ++- synapse/handlers/state_deltas.py | 8 +- synapse/handlers/stats.py | 6 +- synapse/handlers/sync.py | 112 ++++++++++----------- synapse/handlers/typing.py | 4 +- synapse/handlers/user_directory.py | 2 +- synapse/http/client.py | 28 +++--- synapse/http/federation/matrix_federation_agent.py | 46 ++++----- synapse/http/federation/srv_resolver.py | 8 +- synapse/http/matrixfederationclient.py | 16 +-- synapse/logging/opentracing.py | 6 +- synapse/module_api/__init__.py | 2 +- synapse/notifier.py | 18 ++-- synapse/push/bulk_push_rule_evaluator.py | 10 +- synapse/push/httppusher.py | 18 ++-- synapse/push/mailer.py | 84 +++++++--------- synapse/push/presentable_names.py | 25 +++-- synapse/push/push_tools.py | 4 +- synapse/push/pusherpool.py | 6 +- synapse/replication/http/_base.py | 2 +- synapse/replication/http/federation.py | 10 +- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 4 +- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 4 +- synapse/replication/tcp/streams/_base.py | 12 +-- synapse/replication/tcp/streams/events.py | 2 +- synapse/rest/admin/__init__.py | 48 +++++---- synapse/rest/admin/server_notice_servlet.py | 2 +- synapse/rest/client/v1/directory.py | 18 ++-- synapse/rest/client/v1/events.py | 6 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 14 +-- synapse/rest/client/v1/logout.py | 4 +- synapse/rest/client/v1/presence.py | 4 +- synapse/rest/client/v1/profile.py | 14 +-- synapse/rest/client/v1/push_rule.py | 10 +- synapse/rest/client/v1/pusher.py | 8 +- synapse/rest/client/v1/room.py | 46 ++++----- synapse/rest/client/v1/voip.py | 22 ++-- synapse/rest/client/v2_alpha/account.py | 32 +++--- synapse/rest/client/v2_alpha/account_data.py | 8 +- synapse/rest/client/v2_alpha/account_validity.py | 4 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 10 +- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 64 ++++++------ synapse/rest/client/v2_alpha/keys.py | 8 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 18 ++-- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 38 ++++--- synapse/rest/client/v2_alpha/relations.py | 8 +- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 14 +-- .../client/v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 48 ++++----- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- synapse/rest/client/v2_alpha/user_directory.py | 4 +- synapse/rest/media/v1/media_repository.py | 18 ++-- synapse/rest/media/v1/media_storage.py | 12 +-- synapse/rest/media/v1/preview_url_resource.py | 34 +++---- .../resource_limits_server_notices.py | 2 +- synapse/server_notices/server_notices_manager.py | 6 +- synapse/state/__init__.py | 38 ++++--- synapse/state/v1.py | 8 +- synapse/state/v2.py | 26 ++--- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 14 +-- synapse/storage/account_data.py | 14 ++- synapse/storage/appservice.py | 14 ++- synapse/storage/background_updates.py | 20 ++-- synapse/storage/client_ips.py | 26 +++-- synapse/storage/deviceinbox.py | 10 +- synapse/storage/devices.py | 32 +++--- synapse/storage/directory.py | 10 +- synapse/storage/e2e_room_keys.py | 6 +- synapse/storage/end_to_end_keys.py | 8 +- synapse/storage/event_federation.py | 12 +-- synapse/storage/event_push_actions.py | 16 +-- synapse/storage/events.py | 34 +++---- synapse/storage/events_bg_updates.py | 6 +- synapse/storage/events_worker.py | 20 ++-- synapse/storage/filtering.py | 4 +- synapse/storage/group_server.py | 38 ++++--- synapse/storage/monthly_active_users.py | 2 +- synapse/storage/presence.py | 6 +- synapse/storage/profile.py | 12 +-- synapse/storage/push_rule.py | 18 ++-- synapse/storage/pusher.py | 10 +- synapse/storage/receipts.py | 36 +++---- synapse/storage/registration.py | 34 +++---- synapse/storage/relations.py | 4 +- synapse/storage/room.py | 10 +- synapse/storage/roommember.py | 42 ++++---- synapse/storage/search.py | 56 +++++------ synapse/storage/signatures.py | 2 +- synapse/storage/state.py | 54 +++++----- synapse/storage/stats.py | 16 +-- synapse/storage/stream.py | 44 ++++---- synapse/storage/tags.py | 12 +-- synapse/storage/transactions.py | 4 +- synapse/storage/user_directory.py | 30 +++--- synapse/storage/user_erasure_store.py | 5 +- synapse/streams/events.py | 4 +- synapse/util/__init__.py | 2 +- synapse/util/async_helpers.py | 4 +- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/response_cache.py | 2 +- synapse/util/metrics.py | 2 +- synapse/util/retryutils.py | 16 ++- synapse/visibility.py | 8 +- tests/crypto/test_keyring.py | 6 +- tests/handlers/test_register.py | 2 +- .../federation/test_matrix_federation_agent.py | 4 +- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_fedclient.py | 2 +- tests/rest/client/test_transactions.py | 2 +- tests/storage/test_background_update.py | 4 +- tests/storage/test_redaction.py | 4 +- tests/storage/test_roommember.py | 2 +- tests/storage/test_state.py | 2 +- tests/test_visibility.py | 6 +- tests/util/caches/test_descriptors.py | 8 +- tests/utils.py | 6 +- 177 files changed, 1360 insertions(+), 1514 deletions(-) create mode 100644 changelog.d/5736.misc mode change 100755 => 100644 synapse/app/homeserver.py (limited to 'synapse/util/async_helpers.py') diff --git a/changelog.d/5736.misc b/changelog.d/5736.misc new file mode 100644 index 0000000000..5713b8b32d --- /dev/null +++ b/changelog.d/5736.misc @@ -0,0 +1 @@ +Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index f5cd5de8ab..4502cd9454 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -148,7 +148,7 @@ call any other functions. d = more_stuff() result = yield d # also fine, of course - defer.returnValue(result) + return result def nonInlineCallbacksFun(): logger.debug("just a wrapper really") diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7ce6540bdd..351790cca4 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -128,7 +128,7 @@ class Auth(object): ) self._check_joined_room(member, user_id, room_id) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_user_was_in_room(self, room_id, user_id): @@ -156,13 +156,13 @@ class Auth(object): if forgot: raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_host_in_room(self, room_id, host): with Measure(self.clock, "check_host_in_room"): latest_event_ids = yield self.store.is_host_joined(room_id, host) - defer.returnValue(latest_event_ids) + return latest_event_ids def _check_joined_room(self, member, user_id, room_id): if not member or member.membership != Membership.JOIN: @@ -219,9 +219,7 @@ class Auth(object): device_id="dummy-device", # stubbed ) - defer.returnValue( - synapse.types.create_requester(user_id, app_service=app_service) - ) + return synapse.types.create_requester(user_id, app_service=app_service) user_info = yield self.get_user_by_access_token(access_token, rights) user = user_info["user"] @@ -262,10 +260,8 @@ class Auth(object): request.authenticated_entity = user.to_string() - defer.returnValue( - synapse.types.create_requester( - user, token_id, is_guest, device_id, app_service=app_service - ) + return synapse.types.create_requester( + user, token_id, is_guest, device_id, app_service=app_service ) except KeyError: raise MissingClientTokenError() @@ -276,25 +272,25 @@ class Auth(object): self.get_access_token_from_request(request) ) if app_service is None: - defer.returnValue((None, None)) + return (None, None) if app_service.ip_range_whitelist: ip_address = IPAddress(self.hs.get_ip_from_request(request)) if ip_address not in app_service.ip_range_whitelist: - defer.returnValue((None, None)) + return (None, None) if b"user_id" not in request.args: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) user_id = request.args[b"user_id"][0].decode("utf8") if app_service.sender == user_id: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") if not (yield self.store.get_user_by_id(user_id)): raise AuthError(403, "Application service has not registered this user") - defer.returnValue((user_id, app_service)) + return (user_id, app_service) @defer.inlineCallbacks def get_user_by_access_token(self, token, rights="access"): @@ -330,7 +326,7 @@ class Auth(object): msg="Access token has expired", soft_logout=True ) - defer.returnValue(r) + return r # otherwise it needs to be a valid macaroon try: @@ -378,7 +374,7 @@ class Auth(object): } else: raise RuntimeError("Unknown rights setting %s", rights) - defer.returnValue(ret) + return ret except ( _InvalidMacaroonException, pymacaroons.exceptions.MacaroonException, @@ -506,7 +502,7 @@ class Auth(object): def _look_up_user_by_access_token(self, token): ret = yield self.store.get_user_by_access_token(token) if not ret: - defer.returnValue(None) + return None # we use ret.get() below because *lots* of unit tests stub out # get_user_by_access_token in a way where it only returns a couple of @@ -518,7 +514,7 @@ class Auth(object): "device_id": ret.get("device_id"), "valid_until_ms": ret.get("valid_until_ms"), } - defer.returnValue(user_info) + return user_info def get_appservice_by_req(self, request): token = self.get_access_token_from_request(request) @@ -543,7 +539,7 @@ class Auth(object): @defer.inlineCallbacks def compute_auth_events(self, event, current_state_ids, for_verification=False): if event.type == EventTypes.Create: - defer.returnValue([]) + return [] auth_ids = [] @@ -604,7 +600,7 @@ class Auth(object): if member_event.content["membership"] == Membership.JOIN: auth_ids.append(member_event.event_id) - defer.returnValue(auth_ids) + return auth_ids @defer.inlineCallbacks def check_can_change_room_list(self, room_id, user): @@ -618,7 +614,7 @@ class Auth(object): is_admin = yield self.is_server_admin(user) if is_admin: - defer.returnValue(True) + return True user_id = user.to_string() yield self.check_joined_room(room_id, user_id) @@ -712,7 +708,7 @@ class Auth(object): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) except AuthError: visibility = yield self.state.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -721,7 +717,7 @@ class Auth(object): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 9b3daca29b..9f06556bd2 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -132,7 +132,7 @@ class Filtering(object): @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): result = yield self.store.get_user_filter(user_localpart, filter_id) - defer.returnValue(FilterCollection(result)) + return FilterCollection(result) def add_user_filter(self, user_localpart, user_filter): self.check_valid_filter(user_filter) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 5b563c2778..e2822ca848 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_PUT(self, request, user_id): yield self.auth.get_user_by_req(request) - defer.returnValue((200, {})) + return (200, {}) class KeyUploadServlet(RestServlet): @@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet): self.main_uri + request.uri.decode("ascii"), body, headers=headers ) - defer.returnValue((200, result)) + return (200, result) else: # Just interested in counts. result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue((200, {"one_time_key_counts": result})) + return (200, {"one_time_key_counts": result}) class FrontendProxySlavedStore( diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py old mode 100755 new mode 100644 index 34c3f5ee99..7d6b51b5bc --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -406,7 +406,7 @@ def setup(config_options): if provision: yield acme.provision_certificate() - defer.returnValue(provision) + return provision @defer.inlineCallbacks def reprovision_acme(): diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index b26a31dd54..33b3579425 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,21 +175,21 @@ class ApplicationService(object): @defer.inlineCallbacks def _matches_user(self, event, store): if not event: - defer.returnValue(False) + return False if self.is_interested_in_user(event.sender): - defer.returnValue(True) + return True # also check m.room.member state key if event.type == EventTypes.Member and self.is_interested_in_user( event.state_key ): - defer.returnValue(True) + return True if not store: - defer.returnValue(False) + return False does_match = yield self._matches_user_in_member_list(event.room_id, store) - defer.returnValue(does_match) + return does_match @cachedInlineCallbacks(num_args=1, cache_context=True) def _matches_user_in_member_list(self, room_id, store, cache_context): @@ -200,8 +200,8 @@ class ApplicationService(object): # check joined member events for user_id in member_list: if self.is_interested_in_user(user_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False def _matches_room_id(self, event): if hasattr(event, "room_id"): @@ -211,13 +211,13 @@ class ApplicationService(object): @defer.inlineCallbacks def _matches_aliases(self, event, store): if not store or not event: - defer.returnValue(False) + return False alias_list = yield store.get_aliases_for_room(event.room_id) for alias in alias_list: if self.is_interested_in_alias(alias): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def is_interested(self, event, store=None): @@ -231,15 +231,15 @@ class ApplicationService(object): """ # Do cheap checks first if self._matches_room_id(event): - defer.returnValue(True) + return True if (yield self._matches_aliases(event, store)): - defer.returnValue(True) + return True if (yield self._matches_user(event, store)): - defer.returnValue(True) + return True - defer.returnValue(False) + return False def is_interested_in_user(self, user_id): return ( diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 571881775b..007ca75a94 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -97,40 +97,40 @@ class ApplicationServiceApi(SimpleHttpClient): @defer.inlineCallbacks def query_user(self, service, user_id): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: if e.code == 404: - defer.returnValue(False) + return False return logger.warning("query_user to %s received %s", uri, e.code) except Exception as ex: logger.warning("query_user to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_alias(self, service, alias): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: logger.warning("query_alias to %s received %s", uri, e.code) if e.code == 404: - defer.returnValue(False) + return False return except Exception as ex: logger.warning("query_alias to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_3pe(self, service, kind, protocol, fields): @@ -141,7 +141,7 @@ class ApplicationServiceApi(SimpleHttpClient): else: raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind) if service.url is None: - defer.returnValue([]) + return [] uri = "%s%s/thirdparty/%s/%s" % ( service.url, @@ -155,7 +155,7 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning( "query_3pe to %s returned an invalid response %r", uri, response ) - defer.returnValue([]) + return [] ret = [] for r in response: @@ -166,14 +166,14 @@ class ApplicationServiceApi(SimpleHttpClient): "query_3pe to %s returned an invalid result %r", uri, r ) - defer.returnValue(ret) + return ret except Exception as ex: logger.warning("query_3pe to %s threw exception %s", uri, ex) - defer.returnValue([]) + return [] def get_3pe_protocol(self, service, protocol): if service.url is None: - defer.returnValue({}) + return {} @defer.inlineCallbacks def _get(): @@ -189,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning( "query_3pe_protocol to %s did not return a" " valid result", uri ) - defer.returnValue(None) + return None for instance in info.get("instances", []): network_id = instance.get("network_id", None) @@ -198,10 +198,10 @@ class ApplicationServiceApi(SimpleHttpClient): service.id, network_id ).to_string() - defer.returnValue(info) + return info except Exception as ex: logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex) - defer.returnValue(None) + return None key = (service.id, protocol) return self.protocol_meta_cache.wrap(key, _get) @@ -209,7 +209,7 @@ class ApplicationServiceApi(SimpleHttpClient): @defer.inlineCallbacks def push_bulk(self, service, events, txn_id=None): if service.url is None: - defer.returnValue(True) + return True events = self._serialize(events) @@ -229,14 +229,14 @@ class ApplicationServiceApi(SimpleHttpClient): ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) - defer.returnValue(True) + return True return except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) except Exception as ex: logger.warning("push_bulk to %s threw exception %s", uri, ex) failed_transactions_counter.labels(service.id).inc() - defer.returnValue(False) + return False def _serialize(self, events): time_now = self.clock.time_msec() diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index e5b36494f5..42a350bff8 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -193,7 +193,7 @@ class _TransactionController(object): @defer.inlineCallbacks def _is_service_up(self, service): state = yield self.store.get_appservice_state(service) - defer.returnValue(state == ApplicationServiceState.UP or state is None) + return state == ApplicationServiceState.UP or state is None class _Recoverer(object): @@ -208,7 +208,7 @@ class _Recoverer(object): r.service.id, ) r.recover() - defer.returnValue(recoverers) + return recoverers def __init__(self, clock, store, as_api, service, callback): self.clock = clock diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e8bb420ad1..6c3e885e72 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -462,7 +462,7 @@ class StoreKeyFetcher(KeyFetcher): keys = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key - defer.returnValue(keys) + return keys class BaseV2KeyFetcher(object): @@ -566,7 +566,7 @@ class BaseV2KeyFetcher(object): ).addErrback(unwrapFirstError) ) - defer.returnValue(verify_keys) + return verify_keys class PerspectivesKeyFetcher(BaseV2KeyFetcher): @@ -588,7 +588,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): result = yield self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server ) - defer.returnValue(result) + return result except KeyLookupError as e: logger.warning( "Key lookup failed from %r: %s", key_server.server_name, e @@ -601,7 +601,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): str(e), ) - defer.returnValue({}) + return {} results = yield make_deferred_yieldable( defer.gatherResults( @@ -615,7 +615,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) - defer.returnValue(union_of_keys) + return union_of_keys @defer.inlineCallbacks def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): @@ -701,7 +701,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): perspective_name, time_now_ms, added_keys ) - defer.returnValue(keys) + return keys def _validate_perspectives_response(self, key_server, response): """Optionally check the signature on the result of a /key/query request @@ -843,7 +843,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): ) keys.update(response_keys) - defer.returnValue(keys) + return keys @defer.inlineCallbacks diff --git a/synapse/events/builder.py b/synapse/events/builder.py index db011e0407..3997751337 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -144,15 +144,13 @@ class EventBuilder(object): if self._origin_server_ts is not None: event_dict["origin_server_ts"] = self._origin_server_ts - defer.returnValue( - create_local_event_from_event_dict( - clock=self._clock, - hostname=self._hostname, - signing_key=self._signing_key, - format_version=self.format_version, - event_dict=event_dict, - internal_metadata_dict=self.internal_metadata.get_dict(), - ) + return create_local_event_from_event_dict( + clock=self._clock, + hostname=self._hostname, + signing_key=self._signing_key, + format_version=self.format_version, + event_dict=event_dict, + internal_metadata_dict=self.internal_metadata.get_dict(), ) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9545e6c1b..acbcbeeced 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -133,19 +133,17 @@ class EventContext(object): else: prev_state_id = None - defer.returnValue( - { - "prev_state_id": prev_state_id, - "event_type": event.type, - "event_state_key": event.state_key if event.is_state() else None, - "state_group": self.state_group, - "rejected": self.rejected, - "prev_group": self.prev_group, - "delta_ids": _encode_state_dict(self.delta_ids), - "prev_state_events": self.prev_state_events, - "app_service_id": self.app_service.id if self.app_service else None, - } - ) + return { + "prev_state_id": prev_state_id, + "event_type": event.type, + "event_state_key": event.state_key if event.is_state() else None, + "state_group": self.state_group, + "rejected": self.rejected, + "prev_group": self.prev_group, + "delta_ids": _encode_state_dict(self.delta_ids), + "prev_state_events": self.prev_state_events, + "app_service_id": self.app_service.id if self.app_service else None, + } @staticmethod def deserialize(store, input): @@ -202,7 +200,7 @@ class EventContext(object): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._current_state_ids) + return self._current_state_ids @defer.inlineCallbacks def get_prev_state_ids(self, store): @@ -222,7 +220,7 @@ class EventContext(object): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._prev_state_ids) + return self._prev_state_ids def get_cached_current_state_ids(self): """Gets the current state IDs if we have them already cached. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 8f5d95696b..714a9b1579 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -51,7 +51,7 @@ class ThirdPartyEventRules(object): defer.Deferred[bool]: True if the event should be allowed, False if not. """ if self.third_party_rules is None: - defer.returnValue(True) + return True prev_state_ids = yield context.get_prev_state_ids(self.store) @@ -61,7 +61,7 @@ class ThirdPartyEventRules(object): state_events[key] = yield self.store.get_event(event_id, allow_none=True) ret = yield self.third_party_rules.check_event_allowed(event, state_events) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_create_room(self, requester, config, is_requester_admin): @@ -98,7 +98,7 @@ class ThirdPartyEventRules(object): """ if self.third_party_rules is None: - defer.returnValue(True) + return True state_ids = yield self.store.get_filtered_current_state_ids(room_id) room_state_events = yield self.store.get_events(state_ids.values()) @@ -110,4 +110,4 @@ class ThirdPartyEventRules(object): ret = yield self.third_party_rules.check_threepid_can_be_invited( medium, address, state_events ) - defer.returnValue(ret) + return ret diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9487a886f5..07d1c5bcf0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -360,7 +360,7 @@ class EventClientSerializer(object): """ # To handle the case of presence events and the like if not isinstance(event, EventBase): - defer.returnValue(event) + return event event_id = event.event_id serialized_event = serialize_event(event, time_now, **kwargs) @@ -406,7 +406,7 @@ class EventClientSerializer(object): "sender": edit.sender, } - defer.returnValue(serialized_event) + return serialized_event def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index f7bb806ae7..5a1e23a145 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -106,7 +106,7 @@ class FederationBase(object): "Failed to find copy of %s with valid signature", pdu.event_id ) - defer.returnValue(res) + return res handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] @@ -116,9 +116,9 @@ class FederationBase(object): ).addErrback(unwrapFirstError) if include_none: - defer.returnValue(valid_pdus) + return valid_pdus else: - defer.returnValue([p for p in valid_pdus if p]) + return [p for p in valid_pdus if p] def _check_sigs_and_hash(self, room_version, pdu): return make_deferred_yieldable( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3cb4b94420..25ed1257f1 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -213,7 +213,7 @@ class FederationClient(FederationBase): ).addErrback(unwrapFirstError) ) - defer.returnValue(pdus) + return pdus @defer.inlineCallbacks @log_function @@ -245,7 +245,7 @@ class FederationClient(FederationBase): ev = self._get_pdu_cache.get(event_id) if ev: - defer.returnValue(ev) + return ev pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -307,7 +307,7 @@ class FederationClient(FederationBase): if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu - defer.returnValue(signed_pdu) + return signed_pdu @defer.inlineCallbacks @log_function @@ -355,7 +355,7 @@ class FederationClient(FederationBase): auth_chain.sort(key=lambda e: e.depth) - defer.returnValue((pdus, auth_chain)) + return (pdus, auth_chain) except HttpResponseException as e: if e.code == 400 or e.code == 404: logger.info("Failed to use get_room_state_ids API, falling back") @@ -404,7 +404,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue((signed_pdus, signed_auth)) + return (signed_pdus, signed_auth) @defer.inlineCallbacks def get_events_from_store_or_dest(self, destination, room_id, event_ids): @@ -429,7 +429,7 @@ class FederationClient(FederationBase): missing_events.discard(k) if not missing_events: - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -465,7 +465,7 @@ class FederationClient(FederationBase): # We removed all events we successfully fetched from `batch` failed_to_fetch.update(batch) - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) @defer.inlineCallbacks @log_function @@ -485,7 +485,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue(signed_auth) + return signed_auth @defer.inlineCallbacks def _try_destination_list(self, description, destinations, callback): @@ -521,7 +521,7 @@ class FederationClient(FederationBase): try: res = yield callback(destination) - defer.returnValue(res) + return res except InvalidResponseError as e: logger.warn("Failed to %s via %s: %s", description, destination, e) except HttpResponseException as e: @@ -615,7 +615,7 @@ class FederationClient(FederationBase): event_dict=pdu_dict, ) - defer.returnValue((destination, ev, event_format)) + return (destination, ev, event_format) return self._try_destination_list( "make_" + membership, destinations, send_request @@ -728,13 +728,11 @@ class FederationClient(FederationBase): check_authchain_validity(signed_auth) - defer.returnValue( - { - "state": signed_state, - "auth_chain": signed_auth, - "origin": destination, - } - ) + return { + "state": signed_state, + "auth_chain": signed_auth, + "origin": destination, + } return self._try_destination_list("send_join", destinations, send_request) @@ -758,7 +756,7 @@ class FederationClient(FederationBase): # FIXME: We should handle signature failures more gracefully. - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def _do_send_invite(self, destination, pdu, room_version): @@ -786,7 +784,7 @@ class FederationClient(FederationBase): "invite_room_state": pdu.unsigned.get("invite_room_state", []), }, ) - defer.returnValue(content) + return content except HttpResponseException as e: if e.code in [400, 404]: err = e.to_synapse_error() @@ -821,7 +819,7 @@ class FederationClient(FederationBase): event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - defer.returnValue(content) + return content def send_leave(self, destinations, pdu): """Sends a leave event to one of a list of homeservers. @@ -856,7 +854,7 @@ class FederationClient(FederationBase): ) logger.debug("Got content: %s", content) - defer.returnValue(None) + return None return self._try_destination_list("send_leave", destinations, send_request) @@ -917,7 +915,7 @@ class FederationClient(FederationBase): "missing": content.get("missing", []), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_missing_events( @@ -974,7 +972,7 @@ class FederationClient(FederationBase): # get_missing_events signed_events = [] - defer.returnValue(signed_events) + return signed_events @defer.inlineCallbacks def forward_third_party_invite(self, destinations, room_id, event_dict): @@ -986,7 +984,7 @@ class FederationClient(FederationBase): yield self.transport_layer.exchange_third_party_invite( destination=destination, room_id=room_id, event_dict=event_dict ) - defer.returnValue(None) + return None except CodeMessageException: raise except Exception as e: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8c0a18b120..b4b9a05ca6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,7 +99,7 @@ class FederationServer(FederationBase): res = self._transaction_from_pdus(pdus).get_dict() - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks @log_function @@ -126,7 +126,7 @@ class FederationServer(FederationBase): origin, transaction, request_time ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _handle_incoming_transaction(self, origin, transaction, request_time): @@ -147,8 +147,7 @@ class FederationServer(FederationBase): "[%s] We've already responded to this request", transaction.transaction_id, ) - defer.returnValue(response) - return + return response logger.debug("[%s] Transaction is new", transaction.transaction_id) @@ -163,7 +162,7 @@ class FederationServer(FederationBase): yield self.transaction_actions.set_response( origin, transaction, 400, response ) - defer.returnValue((400, response)) + return (400, response) received_pdus_counter.inc(len(transaction.pdus)) @@ -265,7 +264,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response(origin, transaction, 200, response) - defer.returnValue((200, response)) + return (200, response) @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): @@ -298,7 +297,7 @@ class FederationServer(FederationBase): event_id, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_state_ids_request(self, origin, room_id, event_id): @@ -315,9 +314,7 @@ class FederationServer(FederationBase): state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids) - defer.returnValue( - (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) - ) + return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) @defer.inlineCallbacks def _on_context_state_request_compute(self, room_id, event_id): @@ -336,12 +333,10 @@ class FederationServer(FederationBase): ) ) - defer.returnValue( - { - "pdus": [pdu.get_pdu_json() for pdu in pdus], - "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], - } - ) + return { + "pdus": [pdu.get_pdu_json() for pdu in pdus], + "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], + } @defer.inlineCallbacks @log_function @@ -349,15 +344,15 @@ class FederationServer(FederationBase): pdu = yield self.handler.get_persisted_pdu(origin, event_id) if pdu: - defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict())) + return (200, self._transaction_from_pdus([pdu]).get_dict()) else: - defer.returnValue((404, "")) + return (404, "") @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_make_join_request(self, origin, room_id, user_id, supported_versions): @@ -371,9 +366,7 @@ class FederationServer(FederationBase): pdu = yield self.handler.on_make_join_request(room_id, user_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_invite_request(self, origin, content, room_version): @@ -391,7 +384,7 @@ class FederationServer(FederationBase): yield self.check_server_matches_acl(origin_host, pdu.room_id) ret_pdu = yield self.handler.on_invite_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)}) + return {"event": ret_pdu.get_pdu_json(time_now)} @defer.inlineCallbacks def on_send_join_request(self, origin, content, room_id): @@ -407,16 +400,14 @@ class FederationServer(FederationBase): logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) res_pdus = yield self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue( - ( - 200, - { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - }, - ) + return ( + 200, + { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [ + p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] + ], + }, ) @defer.inlineCallbacks @@ -428,9 +419,7 @@ class FederationServer(FederationBase): room_version = yield self.store.get_room_version(room_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_send_leave_request(self, origin, content, room_id): @@ -445,7 +434,7 @@ class FederationServer(FederationBase): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) yield self.handler.on_send_leave_request(origin, pdu) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): @@ -456,7 +445,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() auth_pdus = yield self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_query_auth_request(self, origin, content, room_id, event_id): @@ -509,7 +498,7 @@ class FederationServer(FederationBase): "missing": ret.get("missing", []), } - defer.returnValue((200, send_content)) + return (200, send_content) @log_function def on_query_client_keys(self, origin, content): @@ -548,7 +537,7 @@ class FederationServer(FederationBase): ), ) - defer.returnValue({"one_time_keys": json_result}) + return {"one_time_keys": json_result} @defer.inlineCallbacks @log_function @@ -580,9 +569,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() - defer.returnValue( - {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} - ) + return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function def on_openid_userinfo(self, token): @@ -676,14 +663,14 @@ class FederationServer(FederationBase): ret = yield self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): ret = yield self.handler.on_exchange_third_party_invite_request( origin, room_id, event_dict ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def check_server_matches_acl(self, server_name, room_id): diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 9aab12c0d3..fad980b893 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -374,7 +374,7 @@ class PerDestinationQueue(object): assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" - defer.returnValue((edus, now_stream_id)) + return (edus, now_stream_id) @defer.inlineCallbacks def _get_to_device_message_edus(self, limit): @@ -393,4 +393,4 @@ class PerDestinationQueue(object): for content in contents ] - defer.returnValue((edus, stream_id)) + return (edus, stream_id) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 0460a8c4ac..52706302f2 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -133,4 +133,4 @@ class TransactionManager(object): ) success = False - defer.returnValue(success) + return success diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1aae9ec9e7..2a6709ff48 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -183,7 +183,7 @@ class TransportLayerClient(object): try_trailing_slash_on_400=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -201,7 +201,7 @@ class TransportLayerClient(object): ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -259,7 +259,7 @@ class TransportLayerClient(object): ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -270,7 +270,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -288,7 +288,7 @@ class TransportLayerClient(object): ignore_backoff=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -299,7 +299,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -310,7 +310,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -339,7 +339,7 @@ class TransportLayerClient(object): destination=remote_server, path=path, args=args, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -350,7 +350,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=event_dict ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -359,7 +359,7 @@ class TransportLayerClient(object): content = yield self.client.get_json(destination=destination, path=path) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -370,7 +370,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -402,7 +402,7 @@ class TransportLayerClient(object): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -426,7 +426,7 @@ class TransportLayerClient(object): content = yield self.client.get_json( destination=destination, path=path, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -460,7 +460,7 @@ class TransportLayerClient(object): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -488,7 +488,7 @@ class TransportLayerClient(object): timeout=timeout, ) - defer.returnValue(content) + return content @log_function def get_group_profile(self, destination, group_id, requester_user_id): diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index f497711133..dfd7ae041b 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -157,7 +157,7 @@ class GroupAttestionRenewer(object): yield self.store.update_remote_attestion(group_id, user_id, attestation) - defer.returnValue({}) + return {} def _start_renew_attestations(self): return run_as_background_process("renew_attestations", self._renew_attestations) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 168c9e3f84..d50e691436 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -85,7 +85,7 @@ class GroupsServerHandler(object): if not is_admin: raise SynapseError(403, "User is not admin in group") - defer.returnValue(group) + return group @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): @@ -151,22 +151,20 @@ class GroupsServerHandler(object): group_id, requester_user_id ) - defer.returnValue( - { - "profile": profile, - "users_section": { - "users": users, - "roles": roles, - "total_user_count_estimate": 0, # TODO - }, - "rooms_section": { - "rooms": rooms, - "categories": categories, - "total_room_count_estimate": 0, # TODO - }, - "user": membership_info, - } - ) + return { + "profile": profile, + "users_section": { + "users": users, + "roles": roles, + "total_user_count_estimate": 0, # TODO + }, + "rooms_section": { + "rooms": rooms, + "categories": categories, + "total_room_count_estimate": 0, # TODO + }, + "user": membership_info, + } @defer.inlineCallbacks def update_group_summary_room( @@ -192,7 +190,7 @@ class GroupsServerHandler(object): is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_room( @@ -208,7 +206,7 @@ class GroupsServerHandler(object): group_id=group_id, room_id=room_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def set_group_join_policy(self, group_id, requester_user_id, content): @@ -228,7 +226,7 @@ class GroupsServerHandler(object): yield self.store.set_group_join_policy(group_id, join_policy=join_policy) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): @@ -237,7 +235,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) categories = yield self.store.get_group_categories(group_id=group_id) - defer.returnValue({"categories": categories}) + return {"categories": categories} @defer.inlineCallbacks def get_group_category(self, group_id, requester_user_id, category_id): @@ -249,7 +247,7 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_category(self, group_id, requester_user_id, category_id, content): @@ -269,7 +267,7 @@ class GroupsServerHandler(object): profile=profile, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_category(self, group_id, requester_user_id, category_id): @@ -283,7 +281,7 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_roles(self, group_id, requester_user_id): @@ -292,7 +290,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) roles = yield self.store.get_group_roles(group_id=group_id) - defer.returnValue({"roles": roles}) + return {"roles": roles} @defer.inlineCallbacks def get_group_role(self, group_id, requester_user_id, role_id): @@ -301,7 +299,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) res = yield self.store.get_group_role(group_id=group_id, role_id=role_id) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_role(self, group_id, requester_user_id, role_id, content): @@ -319,7 +317,7 @@ class GroupsServerHandler(object): group_id=group_id, role_id=role_id, is_public=is_public, profile=profile ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_role(self, group_id, requester_user_id, role_id): @@ -331,7 +329,7 @@ class GroupsServerHandler(object): yield self.store.remove_group_role(group_id=group_id, role_id=role_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_group_summary_user( @@ -355,7 +353,7 @@ class GroupsServerHandler(object): is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): @@ -369,7 +367,7 @@ class GroupsServerHandler(object): group_id=group_id, user_id=user_id, role_id=role_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): @@ -391,7 +389,7 @@ class GroupsServerHandler(object): group_description = {key: group[key] for key in cols} group_description["is_openly_joinable"] = group["join_policy"] == "open" - defer.returnValue(group_description) + return group_description else: raise SynapseError(404, "Unknown group") @@ -461,9 +459,7 @@ class GroupsServerHandler(object): # TODO: If admin add lists of users whose attestations have timed out - defer.returnValue( - {"chunk": chunk, "total_user_count_estimate": len(user_results)} - ) + return {"chunk": chunk, "total_user_count_estimate": len(user_results)} @defer.inlineCallbacks def get_invited_users_in_group(self, group_id, requester_user_id): @@ -494,9 +490,7 @@ class GroupsServerHandler(object): logger.warn("Error getting profile for %s: %s", user_id, e) user_profiles.append(user_profile) - defer.returnValue( - {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} - ) + return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): @@ -533,9 +527,7 @@ class GroupsServerHandler(object): chunk.sort(key=lambda e: -e["num_joined_members"]) - defer.returnValue( - {"chunk": chunk, "total_room_count_estimate": len(room_results)} - ) + return {"chunk": chunk, "total_room_count_estimate": len(room_results)} @defer.inlineCallbacks def add_room_to_group(self, group_id, requester_user_id, room_id, content): @@ -551,7 +543,7 @@ class GroupsServerHandler(object): yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_room_in_group( @@ -574,7 +566,7 @@ class GroupsServerHandler(object): else: raise SynapseError(400, "Uknown config option") - defer.returnValue({}) + return {} @defer.inlineCallbacks def remove_room_from_group(self, group_id, requester_user_id, room_id): @@ -586,7 +578,7 @@ class GroupsServerHandler(object): yield self.store.remove_room_from_group(group_id, room_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite_to_group(self, group_id, user_id, requester_user_id, content): @@ -644,9 +636,9 @@ class GroupsServerHandler(object): ) elif res["state"] == "invite": yield self.store.add_group_invite(group_id, user_id) - defer.returnValue({"state": "invite"}) + return {"state": "invite"} elif res["state"] == "reject": - defer.returnValue({"state": "reject"}) + return {"state": "reject"} else: raise SynapseError(502, "Unknown state returned by HS") @@ -679,7 +671,7 @@ class GroupsServerHandler(object): remote_attestation=remote_attestation, ) - defer.returnValue(local_attestation) + return local_attestation @defer.inlineCallbacks def accept_invite(self, group_id, requester_user_id, content): @@ -699,7 +691,7 @@ class GroupsServerHandler(object): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def join_group(self, group_id, requester_user_id, content): @@ -716,7 +708,7 @@ class GroupsServerHandler(object): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def knock(self, group_id, requester_user_id, content): @@ -769,7 +761,7 @@ class GroupsServerHandler(object): if not self.hs.is_mine_id(user_id): yield self.store.maybe_delete_remote_profile_cache(user_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def create_group(self, group_id, requester_user_id, content): @@ -845,7 +837,7 @@ class GroupsServerHandler(object): avatar_url=user_profile.get("avatar_url"), ) - defer.returnValue({"group_id": group_id}) + return {"group_id": group_id} @defer.inlineCallbacks def delete_group(self, group_id, requester_user_id): diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index e62e6cab77..8acd9f9a83 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -51,8 +51,8 @@ class AccountDataEventSource(object): {"type": account_data_type, "content": content, "room_id": room_id} ) - defer.returnValue((results, current_stream_id)) + return (results, current_stream_id) @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): - defer.returnValue(([], config.to_id)) + return ([], config.to_id) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 1f1708ba7d..930204e2d0 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -193,7 +193,7 @@ class AccountValidityHandler(object): if threepid["medium"] == "email": addresses.append(threepid["address"]) - defer.returnValue(addresses) + return addresses @defer.inlineCallbacks def _get_renewal_token(self, user_id): @@ -214,7 +214,7 @@ class AccountValidityHandler(object): try: renewal_token = stringutils.random_string(32) yield self.store.set_renewal_token_for_user(user_id, renewal_token) - defer.returnValue(renewal_token) + return renewal_token except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") @@ -254,4 +254,4 @@ class AccountValidityHandler(object): user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) - defer.returnValue(expiration_ts) + return expiration_ts diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index fbef2f3d38..46ac73106d 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -100,4 +100,4 @@ class AcmeHandler(object): logger.exception("Failed saving!") raise - defer.returnValue(True) + return True diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e8a651e231..2f22f56ca4 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -49,7 +49,7 @@ class AdminHandler(BaseHandler): "devices": {"": {"sessions": [{"connections": connections}]}}, } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users(self): @@ -61,7 +61,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.get_users() - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users_paginate(self, order, start, limit): @@ -78,7 +78,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.get_users_paginate(order, start, limit) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def search_users(self, term): @@ -92,7 +92,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.search_users(term) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def export_user_data(self, user_id, writer): @@ -225,7 +225,7 @@ class AdminHandler(BaseHandler): state = yield self.store.get_state_for_event(event_id) writer.write_state(room_id, event_id, state) - defer.returnValue(writer.finished()) + return writer.finished() class ExfiltrationWriter(object): diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 8f089f0e33..d1a51df6f9 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -167,8 +167,8 @@ class ApplicationServicesHandler(object): for user_service in user_query_services: is_known_user = yield self.appservice_api.query_user(user_service, user_id) if is_known_user: - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def query_room_alias_exists(self, room_alias): @@ -192,7 +192,7 @@ class ApplicationServicesHandler(object): if is_known_alias: # the alias exists now so don't query more ASes. result = yield self.store.get_association_from_room_alias(room_alias) - defer.returnValue(result) + return result @defer.inlineCallbacks def query_3pe(self, kind, protocol, fields): @@ -215,7 +215,7 @@ class ApplicationServicesHandler(object): if success: ret.extend(result) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_3pe_protocols(self, only_protocol=None): @@ -254,7 +254,7 @@ class ApplicationServicesHandler(object): for p in protocols.keys(): protocols[p] = _merge_instances(protocols[p]) - defer.returnValue(protocols) + return protocols @defer.inlineCallbacks def _get_services_for_event(self, event): @@ -276,7 +276,7 @@ class ApplicationServicesHandler(object): if (yield s.is_interested(event, self.store)): interested_list.append(s) - defer.returnValue(interested_list) + return interested_list def _get_services_for_user(self, user_id): services = self.store.get_app_services() @@ -293,23 +293,23 @@ class ApplicationServicesHandler(object): if not self.is_mine_id(user_id): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. - defer.returnValue(False) + return False return user_info = yield self.store.get_user_by_id(user_id) if user_info: - defer.returnValue(False) + return False return # user not found; could be the AS though, so check. services = self.store.get_app_services() service_list = [s for s in services if s.sender == user_id] - defer.returnValue(len(service_list) == 0) + return len(service_list) == 0 @defer.inlineCallbacks def _check_user_exists(self, user_id): unknown_user = yield self._is_unknown_user(user_id) if unknown_user: exists = yield self.query_user_exists(user_id) - defer.returnValue(exists) - defer.returnValue(True) + return exists + return True diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d4d6574975..05be5b7c48 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -155,7 +155,7 @@ class AuthHandler(BaseHandler): if user_id != requester.user.to_string(): raise AuthError(403, "Invalid auth") - defer.returnValue(params) + return params @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip, password_servlet=False): @@ -280,7 +280,7 @@ class AuthHandler(BaseHandler): creds, list(clientdict), ) - defer.returnValue((creds, clientdict, session["id"])) + return (creds, clientdict, session["id"]) ret = self._auth_dict_for_flows(flows, session) ret["completed"] = list(creds) @@ -307,8 +307,8 @@ class AuthHandler(BaseHandler): if result: creds[stagetype] = result self._save_session(sess) - defer.returnValue(True) - defer.returnValue(False) + return True + return False def get_session_id(self, clientdict): """ @@ -379,7 +379,7 @@ class AuthHandler(BaseHandler): res = yield checker( authdict, clientip=clientip, password_servlet=password_servlet ) - defer.returnValue(res) + return res # build a v1-login-style dict out of the authdict and fall back to the # v1 code @@ -389,7 +389,7 @@ class AuthHandler(BaseHandler): raise SynapseError(400, "", Codes.MISSING_PARAM) (canonical_id, callback) = yield self.validate_login(user_id, authdict) - defer.returnValue(canonical_id) + return canonical_id @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip, **kwargs): @@ -433,7 +433,7 @@ class AuthHandler(BaseHandler): resp_body.get("hostname"), ) if resp_body["success"]: - defer.returnValue(True) + return True raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) def _check_email_identity(self, authdict, **kwargs): @@ -502,7 +502,7 @@ class AuthHandler(BaseHandler): threepid["threepid_creds"] = authdict["threepid_creds"] - defer.returnValue(threepid) + return threepid def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} @@ -606,7 +606,7 @@ class AuthHandler(BaseHandler): yield self.store.delete_access_token(access_token) raise StoreError(400, "Login raced against device deletion") - defer.returnValue(access_token) + return access_token @defer.inlineCallbacks def check_user_exists(self, user_id): @@ -629,8 +629,8 @@ class AuthHandler(BaseHandler): self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) if res is not None: - defer.returnValue(res[0]) - defer.returnValue(None) + return res[0] + return None @defer.inlineCallbacks def _find_user_id_and_pwd_hash(self, user_id): @@ -661,7 +661,7 @@ class AuthHandler(BaseHandler): user_id, user_infos.keys(), ) - defer.returnValue(result) + return result def get_supported_login_types(self): """Get a the login types supported for the /login API @@ -722,7 +722,7 @@ class AuthHandler(BaseHandler): known_login_type = True is_valid = yield provider.check_password(qualified_user_id, password) if is_valid: - defer.returnValue((qualified_user_id, None)) + return (qualified_user_id, None) if not hasattr(provider, "get_supported_login_types") or not hasattr( provider, "check_auth" @@ -756,7 +756,7 @@ class AuthHandler(BaseHandler): if result: if isinstance(result, str): result = (result, None) - defer.returnValue(result) + return result if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled: known_login_type = True @@ -766,7 +766,7 @@ class AuthHandler(BaseHandler): ) if canonical_user_id: - defer.returnValue((canonical_user_id, None)) + return (canonical_user_id, None) if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) @@ -814,9 +814,9 @@ class AuthHandler(BaseHandler): if isinstance(result, str): # If it's a str, set callback function to None result = (result, None) - defer.returnValue(result) + return result - defer.returnValue((None, None)) + return (None, None) @defer.inlineCallbacks def _check_local_password(self, user_id, password): @@ -838,7 +838,7 @@ class AuthHandler(BaseHandler): """ lookupres = yield self._find_user_id_and_pwd_hash(user_id) if not lookupres: - defer.returnValue(None) + return None (user_id, password_hash) = lookupres # If the password hash is None, the account has likely been deactivated @@ -850,8 +850,8 @@ class AuthHandler(BaseHandler): result = yield self.validate_hash(password, password_hash) if not result: logger.warn("Failed password login for user %s", user_id) - defer.returnValue(None) - defer.returnValue(user_id) + return None + return user_id @defer.inlineCallbacks def validate_short_term_login_token_and_get_user_id(self, login_token): @@ -865,7 +865,7 @@ class AuthHandler(BaseHandler): raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) yield self.auth.check_auth_blocking(user_id) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def delete_access_token(self, access_token): @@ -976,7 +976,7 @@ class AuthHandler(BaseHandler): ) yield self.store.user_delete_threepid(user_id, medium, address) - defer.returnValue(result) + return result def _save_session(self, session): # TODO: Persistent storage diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index e8f9da6098..5f804d1f13 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -125,7 +125,7 @@ class DeactivateAccountHandler(BaseHandler): # Mark the user as deactivated. yield self.store.set_user_deactivated_status(user_id, True) - defer.returnValue(identity_server_supports_unbinding) + return identity_server_supports_unbinding def _start_user_parting(self): """ diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 99e8413092..d6ab337783 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -64,7 +64,7 @@ class DeviceWorkerHandler(BaseHandler): for device in devices: _update_device_from_client_ips(device, ips) - defer.returnValue(devices) + return devices @defer.inlineCallbacks def get_device(self, user_id, device_id): @@ -85,7 +85,7 @@ class DeviceWorkerHandler(BaseHandler): raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - defer.returnValue(device) + return device @measure_func("device.get_user_ids_changed") @defer.inlineCallbacks @@ -200,9 +200,7 @@ class DeviceWorkerHandler(BaseHandler): possibly_joined = [] possibly_left = [] - defer.returnValue( - {"changed": list(possibly_joined), "left": list(possibly_left)} - ) + return {"changed": list(possibly_joined), "left": list(possibly_left)} class DeviceHandler(DeviceWorkerHandler): @@ -250,7 +248,7 @@ class DeviceHandler(DeviceWorkerHandler): ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id # if the device id is not specified, we'll autogen one, but loop a few # times in case of a clash. @@ -264,7 +262,7 @@ class DeviceHandler(DeviceWorkerHandler): ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id attempts += 1 raise errors.StoreError(500, "Couldn't generate a device ID.") @@ -411,9 +409,7 @@ class DeviceHandler(DeviceWorkerHandler): @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) - defer.returnValue( - {"user_id": user_id, "stream_id": stream_id, "devices": devices} - ) + return {"user_id": user_id, "stream_id": stream_id, "devices": devices} @defer.inlineCallbacks def user_left_room(self, user, room_id): @@ -623,7 +619,7 @@ class DeviceListEduUpdater(object): for _, stream_id, prev_ids, _ in updates: if not prev_ids: # We always do a resync if there are no previous IDs - defer.returnValue(True) + return True for prev_id in prev_ids: if prev_id == extremity: @@ -633,8 +629,8 @@ class DeviceListEduUpdater(object): elif prev_id in stream_id_in_updates: continue else: - defer.returnValue(True) + return True stream_id_in_updates.add(stream_id) - defer.returnValue(False) + return False diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 42d5b3db30..0fd423197c 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -210,7 +210,7 @@ class DirectoryHandler(BaseHandler): except AuthError as e: logger.info("Failed to update alias events: %s", e) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def delete_appservice_association(self, service, room_alias): @@ -229,7 +229,7 @@ class DirectoryHandler(BaseHandler): room_id = yield self.store.delete_room_alias(room_alias) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def get_association(self, room_alias): @@ -277,7 +277,7 @@ class DirectoryHandler(BaseHandler): else: servers = list(servers) - defer.returnValue({"room_id": room_id, "servers": servers}) + return {"room_id": room_id, "servers": servers} return @defer.inlineCallbacks @@ -289,7 +289,7 @@ class DirectoryHandler(BaseHandler): result = yield self.get_association_from_room_alias(room_alias) if result is not None: - defer.returnValue({"room_id": result.room_id, "servers": result.servers}) + return {"room_id": result.room_id, "servers": result.servers} else: raise SynapseError( 404, @@ -342,7 +342,7 @@ class DirectoryHandler(BaseHandler): # Query AS to see if it exists as_handler = self.appservice_handler result = yield as_handler.query_room_alias_exists(room_alias) - defer.returnValue(result) + return result def can_modify_alias(self, alias, user_id=None): # Any application service "interested" in an alias they are regexing on @@ -369,10 +369,10 @@ class DirectoryHandler(BaseHandler): creator = yield self.store.get_room_alias_creator(alias.to_string()) if creator is not None and creator == user_id: - defer.returnValue(True) + return True is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) - defer.returnValue(is_admin) + return is_admin @defer.inlineCallbacks def edit_published_room_list(self, requester, room_id, visibility): diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index fdfe8611b6..1300b540e3 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -144,7 +144,7 @@ class E2eKeysHandler(object): ) ) - defer.returnValue({"device_keys": results, "failures": failures}) + return {"device_keys": results, "failures": failures} @defer.inlineCallbacks def query_local_devices(self, query): @@ -189,7 +189,7 @@ class E2eKeysHandler(object): r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r - defer.returnValue(result_dict) + return result_dict @defer.inlineCallbacks def on_federation_query_client_keys(self, query_body): @@ -197,7 +197,7 @@ class E2eKeysHandler(object): """ device_keys_query = query_body.get("device_keys", {}) res = yield self.query_local_devices(device_keys_query) - defer.returnValue({"device_keys": res}) + return {"device_keys": res} @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): @@ -259,7 +259,7 @@ class E2eKeysHandler(object): ), ) - defer.returnValue({"one_time_keys": json_result, "failures": failures}) + return {"one_time_keys": json_result, "failures": failures} @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): @@ -297,7 +297,7 @@ class E2eKeysHandler(object): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue({"one_time_key_counts": result}) + return {"one_time_key_counts": result} @defer.inlineCallbacks def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index ebd807bca6..41b871fc59 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -84,7 +84,7 @@ class E2eRoomKeysHandler(object): user_id, version, room_id, session_id ) - defer.returnValue(results) + return results @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id=None, session_id=None): @@ -262,7 +262,7 @@ class E2eRoomKeysHandler(object): new_version = yield self.store.create_e2e_room_keys_version( user_id, version_info ) - defer.returnValue(new_version) + return new_version @defer.inlineCallbacks def get_version_info(self, user_id, version=None): @@ -292,7 +292,7 @@ class E2eRoomKeysHandler(object): raise NotFoundError("Unknown backup version") else: raise - defer.returnValue(res) + return res @defer.inlineCallbacks def delete_version(self, user_id, version=None): @@ -350,4 +350,4 @@ class E2eRoomKeysHandler(object): user_id, version, version_info ) - defer.returnValue({}) + return {} diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 6a38328af3..2f1f10a9af 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -143,7 +143,7 @@ class EventStreamHandler(BaseHandler): "end": tokens[1].to_string(), } - defer.returnValue(chunk) + return chunk class EventHandler(BaseHandler): @@ -166,7 +166,7 @@ class EventHandler(BaseHandler): event = yield self.store.get_event(event_id, check_room_id=room_id) if not event: - defer.returnValue(None) + return None return users = yield self.store.get_users_in_room(event.room_id) @@ -179,4 +179,4 @@ class EventHandler(BaseHandler): if not filtered: raise AuthError(403, "You don't have permission to access that event.") - defer.returnValue(event) + return event diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57be968c67..2aa208a2b8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -210,7 +210,7 @@ class FederationHandler(BaseHandler): event_id, origin, ) - defer.returnValue(None) + return None state = None auth_chain = [] @@ -676,7 +676,7 @@ class FederationHandler(BaseHandler): events = [e for e in events if e.event_id not in seen_events] if not events: - defer.returnValue([]) + return [] event_map = {e.event_id: e for e in events} @@ -838,7 +838,7 @@ class FederationHandler(BaseHandler): # TODO: We can probably do something more clever here. yield self._handle_new_event(dest, event, backfilled=True) - defer.returnValue(events) + return events @defer.inlineCallbacks def maybe_backfill(self, room_id, current_depth): @@ -894,7 +894,7 @@ class FederationHandler(BaseHandler): ) if not filtered_extremities: - defer.returnValue(False) + return False # Check if we reached a point where we should start backfilling. sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) @@ -965,7 +965,7 @@ class FederationHandler(BaseHandler): # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. - defer.returnValue(True) + return True except SynapseError as e: logger.info("Failed to backfill from %s because %s", dom, e) continue @@ -985,11 +985,11 @@ class FederationHandler(BaseHandler): logger.exception("Failed to backfill from %s because %s", dom, e) continue - defer.returnValue(False) + return False success = yield try_backfill(likely_domains) if success: - defer.returnValue(True) + return True # Huh, well *those* domains didn't work out. Lets try some domains # from the time. @@ -1031,11 +1031,11 @@ class FederationHandler(BaseHandler): [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: - defer.returnValue(True) + return True tried_domains.update(dom for dom, _ in likely_domains) - defer.returnValue(False) + return False def _sanity_check_event(self, ev): """ @@ -1082,7 +1082,7 @@ class FederationHandler(BaseHandler): pdu=event, ) - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def on_event_auth(self, event_id): @@ -1090,7 +1090,7 @@ class FederationHandler(BaseHandler): auth = yield self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) - defer.returnValue([e for e in auth]) + return [e for e in auth] @log_function @defer.inlineCallbacks @@ -1177,7 +1177,7 @@ class FederationHandler(BaseHandler): run_in_background(self._handle_queued_pdus, room_queue) - defer.returnValue(True) + return True @defer.inlineCallbacks def _handle_queued_pdus(self, room_queue): @@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler): room_version, event, context, do_sig_check=False ) - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1308,7 +1308,7 @@ class FederationHandler(BaseHandler): state = yield self.store.get_events(list(prev_state_ids.values())) - defer.returnValue({"state": list(state.values()), "auth_chain": auth_chain}) + return {"state": list(state.values()), "auth_chain": auth_chain} @defer.inlineCallbacks def on_invite_request(self, origin, pdu): @@ -1364,7 +1364,7 @@ class FederationHandler(BaseHandler): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id): @@ -1389,7 +1389,7 @@ class FederationHandler(BaseHandler): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def _make_and_verify_event( @@ -1407,7 +1407,7 @@ class FederationHandler(BaseHandler): assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - defer.returnValue((origin, event, format_ver)) + return (origin, event, format_ver) @defer.inlineCallbacks @log_function @@ -1451,7 +1451,7 @@ class FederationHandler(BaseHandler): logger.warn("Failed to create new leave %r because %s", event, e) raise e - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1484,7 +1484,7 @@ class FederationHandler(BaseHandler): event.signatures, ) - defer.returnValue(None) + return None @defer.inlineCallbacks def get_state_for_pdu(self, room_id, event_id): @@ -1512,9 +1512,9 @@ class FederationHandler(BaseHandler): del results[(event.type, event.state_key)] res = list(results.values()) - defer.returnValue(res) + return res else: - defer.returnValue([]) + return [] @defer.inlineCallbacks def get_state_ids_for_pdu(self, room_id, event_id): @@ -1539,9 +1539,9 @@ class FederationHandler(BaseHandler): else: results.pop((event.type, event.state_key), None) - defer.returnValue(list(results.values())) + return list(results.values()) else: - defer.returnValue([]) + return [] @defer.inlineCallbacks @log_function @@ -1554,7 +1554,7 @@ class FederationHandler(BaseHandler): events = yield filter_events_for_server(self.store, origin, events) - defer.returnValue(events) + return events @defer.inlineCallbacks @log_function @@ -1584,9 +1584,9 @@ class FederationHandler(BaseHandler): events = yield filter_events_for_server(self.store, origin, [event]) event = events[0] - defer.returnValue(event) + return event else: - defer.returnValue(None) + return None def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @@ -1618,7 +1618,7 @@ class FederationHandler(BaseHandler): self.store.remove_push_actions_from_staging, event.event_id ) - defer.returnValue(context) + return context @defer.inlineCallbacks def _handle_new_events(self, origin, event_infos, backfilled=False): @@ -1641,7 +1641,7 @@ class FederationHandler(BaseHandler): auth_events=ev_info.get("auth_events"), backfilled=backfilled, ) - defer.returnValue(res) + return res contexts = yield make_deferred_yieldable( defer.gatherResults( @@ -1800,7 +1800,7 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.GuestAccess and not context.rejected: yield self.maybe_kick_guest_users(event) - defer.returnValue(context) + return context @defer.inlineCallbacks def _check_for_soft_fail(self, event, state, backfilled): @@ -1919,7 +1919,7 @@ class FederationHandler(BaseHandler): logger.debug("on_query_auth returning: %s", ret) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_get_missing_events( @@ -1942,7 +1942,7 @@ class FederationHandler(BaseHandler): self.store, origin, missing_events ) - defer.returnValue(missing_events) + return missing_events @defer.inlineCallbacks @log_function @@ -2418,16 +2418,14 @@ class FederationHandler(BaseHandler): logger.debug("construct_auth_difference returning") - defer.returnValue( - { - "auth_chain": local_auth, - "rejects": { - e.event_id: {"reason": reason_map[e.event_id], "proof": None} - for e in base_remote_rejected - }, - "missing": [e.event_id for e in missing_locals], - } - ) + return { + "auth_chain": local_auth, + "rejects": { + e.event_id: {"reason": reason_map[e.event_id], "proof": None} + for e in base_remote_rejected + }, + "missing": [e.event_id for e in missing_locals], + } @defer.inlineCallbacks @log_function @@ -2575,7 +2573,7 @@ class FederationHandler(BaseHandler): builder=builder ) EventValidator().validate_new(event) - defer.returnValue((event, context)) + return (event, context) @defer.inlineCallbacks def _check_signature(self, event, context): diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 7da63bb643..7b67c8ae0f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -162,7 +162,7 @@ class GroupsLocalHandler(object): res.setdefault("user", {})["is_publicised"] = is_publicised - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group(self, group_id, user_id, content): @@ -207,7 +207,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -217,7 +217,7 @@ class GroupsLocalHandler(object): res = yield self.groups_server_handler.get_users_in_group( group_id, requester_user_id ) - defer.returnValue(res) + return res group_server_name = get_domain_from_id(group_id) @@ -244,7 +244,7 @@ class GroupsLocalHandler(object): res["chunk"] = valid_entries - defer.returnValue(res) + return res @defer.inlineCallbacks def join_group(self, group_id, user_id, content): @@ -285,7 +285,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): @@ -326,7 +326,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite(self, group_id, user_id, requester_user_id, config): @@ -346,7 +346,7 @@ class GroupsLocalHandler(object): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def on_invite(self, group_id, user_id, content): @@ -377,7 +377,7 @@ class GroupsLocalHandler(object): logger.warn("No profile for user %s: %s", user_id, e) user_profile = {} - defer.returnValue({"state": "invite", "user_profile": user_profile}) + return {"state": "invite", "user_profile": user_profile} @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): @@ -406,7 +406,7 @@ class GroupsLocalHandler(object): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def user_removed_from_group(self, group_id, user_id, content): @@ -421,7 +421,7 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def get_joined_groups(self, user_id): group_ids = yield self.store.get_joined_groups(user_id) - defer.returnValue({"groups": group_ids}) + return {"groups": group_ids} @defer.inlineCallbacks def get_publicised_groups_for_user(self, user_id): @@ -433,14 +433,14 @@ class GroupsLocalHandler(object): for app_service in self.store.get_app_services(): result.extend(app_service.get_groups_for_user(user_id)) - defer.returnValue({"groups": result}) + return {"groups": result} else: bulk_result = yield self.transport_client.bulk_get_publicised_groups( get_domain_from_id(user_id), [user_id] ) result = bulk_result.get("users", {}).get(user_id) # TODO: Verify attestations - defer.returnValue({"groups": result}) + return {"groups": result} @defer.inlineCallbacks def bulk_get_publicised_groups(self, user_ids, proxy=True): @@ -475,4 +475,4 @@ class GroupsLocalHandler(object): for app_service in self.store.get_app_services(): results[uid].extend(app_service.get_groups_for_user(uid)) - defer.returnValue({"users": results}) + return {"users": results} diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 546d6169e9..d199521b58 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -82,7 +82,7 @@ class IdentityHandler(BaseHandler): "%s is not a trusted ID server: rejecting 3pid " + "credentials", id_server, ) - defer.returnValue(None) + return None try: data = yield self.http_client.get_json( @@ -95,8 +95,8 @@ class IdentityHandler(BaseHandler): raise e.to_synapse_error() if "medium" in data: - defer.returnValue(data) - defer.returnValue(None) + return data + return None @defer.inlineCallbacks def bind_threepid(self, creds, mxid): @@ -133,7 +133,7 @@ class IdentityHandler(BaseHandler): ) except CodeMessageException as e: data = json.loads(e.msg) # XXX WAT? - defer.returnValue(data) + return data @defer.inlineCallbacks def try_unbind_threepid(self, mxid, threepid): @@ -161,7 +161,7 @@ class IdentityHandler(BaseHandler): # We don't know where to unbind, so we don't have a choice but to return if not id_servers: - defer.returnValue(False) + return False changed = True for id_server in id_servers: @@ -169,7 +169,7 @@ class IdentityHandler(BaseHandler): mxid, threepid, id_server ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server): @@ -224,7 +224,7 @@ class IdentityHandler(BaseHandler): id_server=id_server, ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def requestEmailToken( @@ -250,7 +250,7 @@ class IdentityHandler(BaseHandler): % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() @@ -278,7 +278,7 @@ class IdentityHandler(BaseHandler): % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 54c966c8a6..42d6650ed9 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -250,7 +250,7 @@ class InitialSyncHandler(BaseHandler): "end": now_token.to_string(), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def room_initial_sync(self, requester, room_id, pagin_config=None): @@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler): result["account_data"] = account_data_events - defer.returnValue(result) + return result @defer.inlineCallbacks def _room_initial_sync_parted( @@ -330,28 +330,24 @@ class InitialSyncHandler(BaseHandler): time_now = self.clock.time_msec() - defer.returnValue( - { - "membership": membership, - "room_id": room_id, - "messages": { - "chunk": ( - yield self._event_serializer.serialize_events( - messages, time_now - ) - ), - "start": start_token.to_string(), - "end": end_token.to_string(), - }, - "state": ( - yield self._event_serializer.serialize_events( - room_state.values(), time_now - ) + return { + "membership": membership, + "room_id": room_id, + "messages": { + "chunk": ( + yield self._event_serializer.serialize_events(messages, time_now) ), - "presence": [], - "receipts": [], - } - ) + "start": start_token.to_string(), + "end": end_token.to_string(), + }, + "state": ( + yield self._event_serializer.serialize_events( + room_state.values(), time_now + ) + ), + "presence": [], + "receipts": [], + } @defer.inlineCallbacks def _room_initial_sync_joined( @@ -384,13 +380,13 @@ class InitialSyncHandler(BaseHandler): def get_presence(): # If presence is disabled, return an empty list if not self.hs.config.use_presence: - defer.returnValue([]) + return [] states = yield presence_handler.get_states( [m.user_id for m in room_members], as_event=True ) - defer.returnValue(states) + return states @defer.inlineCallbacks def get_receipts(): @@ -399,7 +395,7 @@ class InitialSyncHandler(BaseHandler): ) if not receipts: receipts = [] - defer.returnValue(receipts) + return receipts presence, receipts, (messages, token) = yield make_deferred_yieldable( defer.gatherResults( @@ -442,7 +438,7 @@ class InitialSyncHandler(BaseHandler): if not is_peeking: ret["membership"] = membership - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _check_in_room_or_world_readable(self, room_id, user_id): @@ -453,7 +449,7 @@ class InitialSyncHandler(BaseHandler): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) return except AuthError: visibility = yield self.state_handler.get_current_state( @@ -463,7 +459,7 @@ class InitialSyncHandler(BaseHandler): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6d7a987f13..8b27e23378 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -87,7 +87,7 @@ class MessageHandler(object): ) data = room_state[membership_event_id].get(key) - defer.returnValue(data) + return data @defer.inlineCallbacks def get_state_events( @@ -174,7 +174,7 @@ class MessageHandler(object): # events, as clients won't use them. bundle_aggregations=False, ) - defer.returnValue(events) + return events @defer.inlineCallbacks def get_joined_members(self, requester, room_id): @@ -213,15 +213,13 @@ class MessageHandler(object): # Loop fell through, AS has no interested users in room raise AuthError(403, "Appservice not in room") - defer.returnValue( - { - user_id: { - "avatar_url": profile.avatar_url, - "display_name": profile.display_name, - } - for user_id, profile in iteritems(users_with_profile) + return { + user_id: { + "avatar_url": profile.avatar_url, + "display_name": profile.display_name, } - ) + for user_id, profile in iteritems(users_with_profile) + } class EventCreationHandler(object): @@ -398,7 +396,7 @@ class EventCreationHandler(object): self.validator.validate_new(event) - defer.returnValue((event, context)) + return (event, context) def _is_exempt_from_privacy_policy(self, builder, requester): """"Determine if an event to be sent is exempt from having to consent @@ -425,9 +423,9 @@ class EventCreationHandler(object): @defer.inlineCallbacks def _is_server_notices_room(self, room_id): if self.config.server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self.config.server_notices_mxid in user_ids) + return self.config.server_notices_mxid in user_ids @defer.inlineCallbacks def assert_accepted_privacy_policy(self, requester): @@ -507,7 +505,7 @@ class EventCreationHandler(object): event.event_id, prev_state.event_id, ) - defer.returnValue(prev_state) + return prev_state yield self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit @@ -531,7 +529,7 @@ class EventCreationHandler(object): prev_content = encode_canonical_json(prev_event.content) next_content = encode_canonical_json(event.content) if prev_content == next_content: - defer.returnValue(prev_event) + return prev_event return @defer.inlineCallbacks @@ -563,7 +561,7 @@ class EventCreationHandler(object): yield self.send_nonmember_event( requester, event, context, ratelimit=ratelimit ) - defer.returnValue(event) + return event @measure_func("create_new_client_event") @defer.inlineCallbacks @@ -626,7 +624,7 @@ class EventCreationHandler(object): logger.debug("Created event %s", event.event_id) - defer.returnValue((event, context)) + return (event, context) @measure_func("handle_new_client_event") @defer.inlineCallbacks diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 20bcfed334..d83aab3f74 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -242,13 +242,11 @@ class PaginationHandler(object): ) if not events: - defer.returnValue( - { - "chunk": [], - "start": pagin_config.from_token.to_string(), - "end": next_token.to_string(), - } - ) + return { + "chunk": [], + "start": pagin_config.from_token.to_string(), + "end": next_token.to_string(), + } state = None if event_filter and event_filter.lazy_load_members() and len(events) > 0: @@ -286,4 +284,4 @@ class PaginationHandler(object): ) ) - defer.returnValue(chunk) + return chunk diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6f3537e435..ea54d0b991 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -461,7 +461,7 @@ class PresenceHandler(object): if affect_presence: run_in_background(_end) - defer.returnValue(_user_syncing()) + return _user_syncing() def get_currently_syncing_users(self): """Get the set of user ids that are currently syncing on this HS. @@ -556,7 +556,7 @@ class PresenceHandler(object): """Get the current presence state for a user. """ res = yield self.current_state_for_users([user_id]) - defer.returnValue(res[user_id]) + return res[user_id] @defer.inlineCallbacks def current_state_for_users(self, user_ids): @@ -585,7 +585,7 @@ class PresenceHandler(object): states.update(new) self.user_to_current_state.update(new) - defer.returnValue(states) + return states @defer.inlineCallbacks def _persist_and_notify(self, states): @@ -681,7 +681,7 @@ class PresenceHandler(object): def get_state(self, target_user, as_event=False): results = yield self.get_states([target_user.to_string()], as_event=as_event) - defer.returnValue(results[0]) + return results[0] @defer.inlineCallbacks def get_states(self, target_user_ids, as_event=False): @@ -703,17 +703,15 @@ class PresenceHandler(object): now = self.clock.time_msec() if as_event: - defer.returnValue( - [ - { - "type": "m.presence", - "content": format_user_presence_state(state, now), - } - for state in updates - ] - ) + return [ + { + "type": "m.presence", + "content": format_user_presence_state(state, now), + } + for state in updates + ] else: - defer.returnValue(updates) + return updates @defer.inlineCallbacks def set_state(self, target_user, state, ignore_status_msg=False): @@ -757,9 +755,9 @@ class PresenceHandler(object): ) if observer_room_ids & observed_room_ids: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def get_all_presence_updates(self, last_id, current_id): @@ -778,7 +776,7 @@ class PresenceHandler(object): # TODO(markjh): replicate the unpersisted changes. # This could use the in-memory stores for recent changes. rows = yield self.store.get_all_presence_updates(last_id, current_id) - defer.returnValue(rows) + return rows def notify_new_event(self): """Called when new events have happened. Handles users and servers @@ -1034,7 +1032,7 @@ class PresenceEventSource(object): # # Hence this guard where we just return nothing so that the sync # doesn't return. C.f. #5503. - defer.returnValue(([], max_token)) + return ([], max_token) presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache @@ -1068,17 +1066,11 @@ class PresenceEventSource(object): updates = yield presence.current_state_for_users(user_ids_changed) if include_offline: - defer.returnValue((list(updates.values()), max_token)) + return (list(updates.values()), max_token) else: - defer.returnValue( - ( - [ - s - for s in itervalues(updates) - if s.state != PresenceState.OFFLINE - ], - max_token, - ) + return ( + [s for s in itervalues(updates) if s.state != PresenceState.OFFLINE], + max_token, ) def get_current_key(self): @@ -1107,7 +1099,7 @@ class PresenceEventSource(object): ) users_interested_in.update(user_ids) - defer.returnValue(users_interested_in) + return users_interested_in def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): @@ -1287,7 +1279,7 @@ def get_interested_parties(store, states): # Always notify self users_to_states.setdefault(state.user_id, []).append(state) - defer.returnValue((room_ids_to_states, users_to_states)) + return (room_ids_to_states, users_to_states) @defer.inlineCallbacks @@ -1321,4 +1313,4 @@ def get_interested_remotes(store, states, state_handler): host = get_domain_from_id(user_id) hosts_and_states.append(([host], states)) - defer.returnValue(hosts_and_states) + return hosts_and_states diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a2388a7091..2cc237e6a5 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -73,7 +73,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: try: result = yield self.federation.make_query( @@ -82,7 +82,7 @@ class BaseProfileHandler(BaseHandler): args={"user_id": user_id}, ignore_backoff=True, ) - defer.returnValue(result) + return result except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to fetch profile"), e) except HttpResponseException as e: @@ -108,10 +108,10 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: profile = yield self.store.get_from_remote_profile_cache(user_id) - defer.returnValue(profile or {}) + return profile or {} @defer.inlineCallbacks def get_displayname(self, target_user): @@ -125,7 +125,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(displayname) + return displayname else: try: result = yield self.federation.make_query( @@ -139,7 +139,7 @@ class BaseProfileHandler(BaseHandler): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["displayname"]) + return result["displayname"] @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -186,7 +186,7 @@ class BaseProfileHandler(BaseHandler): if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(avatar_url) + return avatar_url else: try: result = yield self.federation.make_query( @@ -200,7 +200,7 @@ class BaseProfileHandler(BaseHandler): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["avatar_url"]) + return result["avatar_url"] @defer.inlineCallbacks def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False): @@ -251,7 +251,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(response) + return response @defer.inlineCallbacks def _update_join_states(self, requester, target_user): diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee..218d60f0c3 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -84,7 +84,7 @@ class ReceiptsHandler(BaseHandler): if min_batch_id is None: # no new receipts - defer.returnValue(False) + return False affected_room_ids = list(set([r.room_id for r in receipts])) @@ -94,7 +94,7 @@ class ReceiptsHandler(BaseHandler): min_batch_id, max_batch_id, affected_room_ids ) - defer.returnValue(True) + return True @defer.inlineCallbacks def received_client_receipt(self, room_id, receipt_type, user_id, event_id): @@ -124,9 +124,9 @@ class ReceiptsHandler(BaseHandler): ) if not result: - defer.returnValue([]) + return [] - defer.returnValue(result) + return result class ReceiptEventSource(object): @@ -139,13 +139,13 @@ class ReceiptEventSource(object): to_key = yield self.get_current_key() if from_key == to_key: - defer.returnValue(([], to_key)) + return ([], to_key) events = yield self.store.get_linearized_receipts_for_rooms( room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() @@ -164,4 +164,4 @@ class ReceiptEventSource(object): room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bb7cfd71b9..4631fab94e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -265,7 +265,7 @@ class RegistrationHandler(BaseHandler): # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None, False) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def _auto_join_rooms(self, user_id): @@ -360,7 +360,7 @@ class RegistrationHandler(BaseHandler): appservice_id=service_id, create_profile_with_displayname=user.localpart, ) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def check_recaptcha(self, ip, private_key, challenge, response): @@ -461,7 +461,7 @@ class RegistrationHandler(BaseHandler): id = self._next_generated_user_id self._next_generated_user_id += 1 - defer.returnValue(str(id)) + return str(id) @defer.inlineCallbacks def _validate_captcha(self, ip_addr, private_key, challenge, response): @@ -481,7 +481,7 @@ class RegistrationHandler(BaseHandler): "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" + "error=%s" % lines[1], } - defer.returnValue(json) + return json @defer.inlineCallbacks def _submit_captcha(self, ip_addr, private_key, challenge, response): @@ -497,7 +497,7 @@ class RegistrationHandler(BaseHandler): "response": response, }, ) - defer.returnValue(data) + return data @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): @@ -622,7 +622,7 @@ class RegistrationHandler(BaseHandler): initial_display_name=initial_display_name, is_guest=is_guest, ) - defer.returnValue((r["device_id"], r["access_token"])) + return (r["device_id"], r["access_token"]) valid_until_ms = None if self.session_lifetime is not None: @@ -645,7 +645,7 @@ class RegistrationHandler(BaseHandler): user_id, device_id=device_id, valid_until_ms=valid_until_ms ) - defer.returnValue((device_id, access_token)) + return (device_id, access_token) @defer.inlineCallbacks def post_registration_actions( @@ -798,7 +798,7 @@ class RegistrationHandler(BaseHandler): if ex.errcode == Codes.MISSING_PARAM: # This will only happen if the ID server returns a malformed response logger.info("Can't add incomplete 3pid") - defer.returnValue(None) + return None raise yield self._auth_handler.add_threepid( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index db3f8cb76b..5caa90c3b7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -128,7 +128,7 @@ class RoomCreationHandler(BaseHandler): old_room_id, new_version, # args for _upgrade_room ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _upgrade_room(self, requester, old_room_id, new_version): @@ -193,7 +193,7 @@ class RoomCreationHandler(BaseHandler): requester, old_room_id, new_room_id, old_room_state ) - defer.returnValue(new_room_id) + return new_room_id @defer.inlineCallbacks def _update_upgraded_room_pls( @@ -671,7 +671,7 @@ class RoomCreationHandler(BaseHandler): result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event(requester, room_id) - defer.returnValue(result) + return result @defer.inlineCallbacks def _send_events_for_new_room( @@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler): room_creator_user_id=creator_id, is_public=is_public, ) - defer.returnValue(gen_room_id) + return gen_room_id except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a room ID.") @@ -839,7 +839,7 @@ class RoomContextHandler(object): event_id, get_prev_content=True, allow_none=True ) if not event: - defer.returnValue(None) + return None return filtered = yield (filter_evts([event])) @@ -890,7 +890,7 @@ class RoomContextHandler(object): results["end"] = token.copy_and_replace("room_key", results["end"]).to_string() - defer.returnValue(results) + return results class RoomEventSource(object): @@ -941,7 +941,7 @@ class RoomEventSource(object): else: end_key = to_key - defer.returnValue((events, end_key)) + return (events, end_key) def get_current_key(self): return self.store.get_room_events_max_id() @@ -959,4 +959,4 @@ class RoomEventSource(object): limit=config.limit, ) - defer.returnValue((events, next_key)) + return (events, next_key) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index aae696a7e8..e9094ad02b 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -325,7 +325,7 @@ class RoomListHandler(BaseHandler): current_limit=since_token.current_limit - 1, ).to_token() - defer.returnValue(results) + return results @defer.inlineCallbacks def _append_room_entry_to_chunk( @@ -420,7 +420,7 @@ class RoomListHandler(BaseHandler): if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: - defer.returnValue(None) + return None # Return whether this room is open to federation users or not create_event = current_state.get((EventTypes.Create, "")) @@ -469,7 +469,7 @@ class RoomListHandler(BaseHandler): if avatar_url: result["avatar_url"] = avatar_url - defer.returnValue(result) + return result @defer.inlineCallbacks def get_remote_public_room_list( @@ -482,7 +482,7 @@ class RoomListHandler(BaseHandler): third_party_instance_id=None, ): if not self.enable_room_list_search: - defer.returnValue({"chunk": [], "total_room_count_estimate": 0}) + return {"chunk": [], "total_room_count_estimate": 0} if search_filter: # We currently don't support searching across federation, so we have @@ -507,7 +507,7 @@ class RoomListHandler(BaseHandler): ] } - defer.returnValue(res) + return res def _get_remote_list_cached( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e0196ef83e..baea08ddd0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -191,7 +191,7 @@ class RoomMemberHandler(object): ) if duplicate is not None: # Discard the new event since this membership change is a no-op. - defer.returnValue(duplicate) + return duplicate yield self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target], ratelimit=ratelimit @@ -233,7 +233,7 @@ class RoomMemberHandler(object): if prev_member_event.membership == Membership.JOIN: yield self._user_left_room(target, room_id) - defer.returnValue(event) + return event @defer.inlineCallbacks def copy_room_tags_and_direct_to_room(self, old_room_id, new_room_id, user_id): @@ -303,7 +303,7 @@ class RoomMemberHandler(object): require_consent=require_consent, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _update_membership( @@ -423,7 +423,7 @@ class RoomMemberHandler(object): same_membership = old_membership == effective_membership_state same_sender = requester.user.to_string() == old_state.sender if same_sender and same_membership and same_content: - defer.returnValue(old_state) + return old_state if old_membership in ["ban", "leave"] and action == "kick": raise AuthError(403, "The target user is not in the room") @@ -473,7 +473,7 @@ class RoomMemberHandler(object): ret = yield self._remote_join( requester, remote_room_hosts, room_id, target, content ) - defer.returnValue(ret) + return ret elif effective_membership_state == Membership.LEAVE: if not is_host_in_room: @@ -495,7 +495,7 @@ class RoomMemberHandler(object): res = yield self._remote_reject_invite( requester, remote_room_hosts, room_id, target ) - defer.returnValue(res) + return res res = yield self._local_membership_update( requester=requester, @@ -508,7 +508,7 @@ class RoomMemberHandler(object): content=content, require_consent=require_consent, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def send_membership_event( @@ -596,11 +596,11 @@ class RoomMemberHandler(object): """ guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None) if not guest_access_id: - defer.returnValue(False) + return False guest_access = yield self.store.get_event(guest_access_id) - defer.returnValue( + return ( guest_access and guest_access.content and "guest_access" in guest_access.content @@ -635,7 +635,7 @@ class RoomMemberHandler(object): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - defer.returnValue((RoomID.from_string(room_id), servers)) + return (RoomID.from_string(room_id), servers) @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -643,7 +643,7 @@ class RoomMemberHandler(object): user_id=user_id, room_id=room_id ) if invite: - defer.returnValue(UserID.from_string(invite.sender)) + return UserID.from_string(invite.sender) @defer.inlineCallbacks def do_3pid_invite( @@ -708,11 +708,11 @@ class RoomMemberHandler(object): if "signatures" not in data: raise AuthError(401, "No signatures on 3pid binding") yield self._verify_any_signature(data, id_server) - defer.returnValue(data["mxid"]) + return data["mxid"] except IOError as e: logger.warn("Error from identity server lookup: %s" % (e,)) - defer.returnValue(None) + return None @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): @@ -904,7 +904,7 @@ class RoomMemberHandler(object): if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] - defer.returnValue((token, public_keys, fallback_public_key, display_name)) + return (token, public_keys, fallback_public_key, display_name) @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): @@ -913,7 +913,7 @@ class RoomMemberHandler(object): create_event_id = current_state_ids.get(("m.room.create", "")) if len(current_state_ids) == 1 and create_event_id: # We can only get here if we're in the process of creating the room - defer.returnValue(True) + return True for etype, state_key in current_state_ids: if etype != EventTypes.Member or not self.hs.is_mine_id(state_key): @@ -925,16 +925,16 @@ class RoomMemberHandler(object): continue if event.membership == Membership.JOIN: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def _is_server_notice_room(self, room_id): if self._server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self._server_notices_mxid in user_ids) + return self._server_notices_mxid in user_ids class RoomMemberMasterHandler(RoomMemberHandler): @@ -978,7 +978,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): ret = yield fed_handler.do_remotely_reject_invite( remote_room_hosts, room_id, target.to_string() ) - defer.returnValue(ret) + return ret except Exception as e: # if we were unable to reject the exception, just mark # it as rejected on our end and plough ahead. @@ -989,7 +989,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): logger.warn("Failed to reject invite: %s", e) yield self.store.locally_reject_invite(target.to_string(), room_id) - defer.returnValue({}) + return {} def _user_joined_room(self, target, room_id): """Implements RoomMemberHandler._user_joined_room diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index fc873a3ba6..75e96ae1a2 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -53,7 +53,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): yield self._user_joined_room(user, room_id) - defer.returnValue(ret) + return ret def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index ddc4430d03..cd5e90bacb 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -69,7 +69,7 @@ class SearchHandler(BaseHandler): # Scan through the old room for further predecessors room_id = predecessor["room_id"] - defer.returnValue(historical_room_ids) + return historical_room_ids @defer.inlineCallbacks def search(self, user, content, batch=None): @@ -186,13 +186,11 @@ class SearchHandler(BaseHandler): room_ids.intersection_update({batch_group_key}) if not room_ids: - defer.returnValue( - { - "search_categories": { - "room_events": {"results": [], "count": 0, "highlights": []} - } + return { + "search_categories": { + "room_events": {"results": [], "count": 0, "highlights": []} } - ) + } rank_map = {} # event_id -> rank of event allowed_events = [] @@ -455,4 +453,4 @@ class SearchHandler(BaseHandler): if global_next_batch: rooms_cat_res["next_batch"] = global_next_batch - defer.returnValue({"search_categories": {"room_events": rooms_cat_res}}) + return {"search_categories": {"room_events": rooms_cat_res}} diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index 6b364befd5..f065970c40 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -48,7 +48,7 @@ class StateDeltasHandler(object): if not event and not prev_event: logger.debug("Neither event exists: %r %r", prev_event_id, event_id) - defer.returnValue(None) + return None prev_value = None value = None @@ -62,8 +62,8 @@ class StateDeltasHandler(object): logger.debug("prev_value: %r -> value: %r", prev_value, value) if value == public_value and prev_value != public_value: - defer.returnValue(True) + return True elif value != public_value and prev_value == public_value: - defer.returnValue(False) + return False else: - defer.returnValue(None) + return None diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index a0ee8db988..4449da6669 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -86,7 +86,7 @@ class StatsHandler(StateDeltasHandler): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: @@ -328,6 +328,6 @@ class StatsHandler(StateDeltasHandler): == "world_readable" ) ): - defer.returnValue(True) + return True else: - defer.returnValue(False) + return False diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd1ac0a27a..4007284e5b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -263,7 +263,7 @@ class SyncHandler(object): timeout, full_state, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): @@ -303,7 +303,7 @@ class SyncHandler(object): lazy_loaded = "false" non_empty_sync_counter.labels(sync_type, lazy_loaded).inc() - defer.returnValue(result) + return result def current_sync_for_user(self, sync_config, since_token=None, full_state=False): """Get the sync for client needed to match what the server has now. @@ -317,7 +317,7 @@ class SyncHandler(object): user_id = user.to_string() rules = yield self.store.get_push_rules_for_user(user_id) rules = format_push_rules_for_user(user, rules) - defer.returnValue(rules) + return rules @defer.inlineCallbacks def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): @@ -378,7 +378,7 @@ class SyncHandler(object): event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - defer.returnValue((now_token, ephemeral_by_room)) + return (now_token, ephemeral_by_room) @defer.inlineCallbacks def _load_filtered_recents( @@ -426,8 +426,8 @@ class SyncHandler(object): recents = [] if not limited or block_all_timeline: - defer.returnValue( - TimelineBatch(events=recents, prev_batch=now_token, limited=False) + return TimelineBatch( + events=recents, prev_batch=now_token, limited=False ) filtering_factor = 2 @@ -490,12 +490,10 @@ class SyncHandler(object): prev_batch_token = now_token.copy_and_replace("room_key", room_key) - defer.returnValue( - TimelineBatch( - events=recents, - prev_batch=prev_batch_token, - limited=limited or newly_joined_room, - ) + return TimelineBatch( + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room, ) @defer.inlineCallbacks @@ -517,7 +515,7 @@ class SyncHandler(object): if event.is_state(): state_ids = state_ids.copy() state_ids[(event.type, event.state_key)] = event.event_id - defer.returnValue(state_ids) + return state_ids @defer.inlineCallbacks def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): @@ -549,7 +547,7 @@ class SyncHandler(object): else: # no events in this room - so presumably no state state = {} - defer.returnValue(state) + return state @defer.inlineCallbacks def compute_summary(self, room_id, sync_config, batch, state, now_token): @@ -579,7 +577,7 @@ class SyncHandler(object): ) if not last_events: - defer.returnValue(None) + return None return last_event = last_events[-1] @@ -611,14 +609,14 @@ class SyncHandler(object): if name_id: name = yield self.store.get_event(name_id, allow_none=True) if name and name.content.get("name"): - defer.returnValue(summary) + return summary if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True ) if canonical_alias and canonical_alias.content.get("alias"): - defer.returnValue(summary) + return summary me = sync_config.user.to_string() @@ -652,7 +650,7 @@ class SyncHandler(object): summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5] if not sync_config.filter_collection.lazy_load_members(): - defer.returnValue(summary) + return summary # ensure we send membership events for heroes if needed cache_key = (sync_config.user.to_string(), sync_config.device_id) @@ -686,7 +684,7 @@ class SyncHandler(object): cache.set(s.state_key, s.event_id) state[(EventTypes.Member, s.state_key)] = s - defer.returnValue(summary) + return summary def get_lazy_loaded_members_cache(self, cache_key): cache = self.lazy_loaded_members_cache.get(cache_key) @@ -871,14 +869,12 @@ class SyncHandler(object): if state_ids: state = yield self.store.get_events(list(state_ids.values())) - defer.returnValue( - { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - list(state.values()) - ) - } - ) + return { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + list(state.values()) + ) + } @defer.inlineCallbacks def unread_notifs_for_room_id(self, room_id, sync_config): @@ -894,11 +890,11 @@ class SyncHandler(object): notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) - defer.returnValue(notifs) + return notifs # There is no new information in this period, so your notification # count is whatever it was last time. - defer.returnValue(None) + return None @defer.inlineCallbacks def generate_sync_result(self, sync_config, since_token=None, full_state=False): @@ -989,19 +985,17 @@ class SyncHandler(object): "Sync result for newly joined room %s: %r", room_id, joined_room ) - defer.returnValue( - SyncResult( - presence=sync_result_builder.presence, - account_data=sync_result_builder.account_data, - joined=sync_result_builder.joined, - invited=sync_result_builder.invited, - archived=sync_result_builder.archived, - to_device=sync_result_builder.to_device, - device_lists=device_lists, - groups=sync_result_builder.groups, - device_one_time_keys_count=one_time_key_counts, - next_batch=sync_result_builder.now_token, - ) + return SyncResult( + presence=sync_result_builder.presence, + account_data=sync_result_builder.account_data, + joined=sync_result_builder.joined, + invited=sync_result_builder.invited, + archived=sync_result_builder.archived, + to_device=sync_result_builder.to_device, + device_lists=device_lists, + groups=sync_result_builder.groups, + device_one_time_keys_count=one_time_key_counts, + next_batch=sync_result_builder.now_token, ) @measure_func("_generate_sync_entry_for_groups") @@ -1124,11 +1118,9 @@ class SyncHandler(object): # Remove any users that we still share a room with. newly_left_users -= users_who_share_room - defer.returnValue( - DeviceLists(changed=users_that_have_changed, left=newly_left_users) - ) + return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: - defer.returnValue(DeviceLists(changed=[], left=[])) + return DeviceLists(changed=[], left=[]) @defer.inlineCallbacks def _generate_sync_entry_for_to_device(self, sync_result_builder): @@ -1225,7 +1217,7 @@ class SyncHandler(object): sync_result_builder.account_data = account_data_for_user - defer.returnValue(account_data_by_room) + return account_data_by_room @defer.inlineCallbacks def _generate_sync_entry_for_presence( @@ -1325,7 +1317,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - defer.returnValue(([], [], [], [])) + return ([], [], [], []) ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1388,13 +1380,11 @@ class SyncHandler(object): newly_left_users -= newly_joined_or_invited_users - defer.returnValue( - ( - newly_joined_rooms, - newly_joined_or_invited_users, - newly_left_rooms, - newly_left_users, - ) + return ( + newly_joined_rooms, + newly_joined_or_invited_users, + newly_left_rooms, + newly_left_users, ) @defer.inlineCallbacks @@ -1414,13 +1404,13 @@ class SyncHandler(object): ) if rooms_changed: - defer.returnValue(True) + return True stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream for room_id in sync_result_builder.joined_room_ids: if self.store.has_room_changed_since(room_id, stream_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def _get_rooms_changed(self, sync_result_builder, ignored_users): @@ -1637,7 +1627,7 @@ class SyncHandler(object): ) room_entries.append(entry) - defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms)) + return (room_entries, invited, newly_joined_rooms, newly_left_rooms) @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): @@ -1711,7 +1701,7 @@ class SyncHandler(object): ) ) - defer.returnValue((room_entries, invited, [])) + return (room_entries, invited, []) @defer.inlineCallbacks def _generate_room_entry( @@ -1912,7 +1902,7 @@ class SyncHandler(object): joined_room_ids.add(room_id) joined_room_ids = frozenset(joined_room_ids) - defer.returnValue(joined_room_ids) + return joined_room_ids def _action_has_highlight(actions): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c3e0c8fc7e..6b661aa93d 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -140,7 +140,7 @@ class TypingHandler(object): if was_present: # No point sending another notification - defer.returnValue(None) + return None self._push_update(member=member, typing=True) @@ -173,7 +173,7 @@ class TypingHandler(object): def _stopped_typing(self, member): if member.user_id not in self._room_typing.get(member.room_id, set()): # No point - defer.returnValue(None) + return None self._member_typing_until.pop(member, None) self._member_last_federation_poke.pop(member, None) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 5de9630950..e53669e40d 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -133,7 +133,7 @@ class UserDirectoryHandler(StateDeltasHandler): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: diff --git a/synapse/http/client.py b/synapse/http/client.py index 45d5010952..0ac20ebefc 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -294,7 +294,7 @@ class SimpleHttpClient(object): logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code ) - defer.returnValue(response) + return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( @@ -345,7 +345,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -385,7 +385,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -410,7 +410,7 @@ class SimpleHttpClient(object): ValueError: if the response was not JSON """ body = yield self.get_raw(uri, args, headers=headers) - defer.returnValue(json.loads(body)) + return json.loads(body) @defer.inlineCallbacks def put_json(self, uri, json_body, args={}, headers=None): @@ -453,7 +453,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -488,7 +488,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(body) + return body else: raise HttpResponseException(response.code, response.phrase, body) @@ -545,13 +545,11 @@ class SimpleHttpClient(object): except Exception as e: raise_from(SynapseError(502, ("Failed to download remote body: %s" % e)), e) - defer.returnValue( - ( - length, - resp_headers, - response.request.absoluteURI.decode("ascii"), - response.code, - ) + return ( + length, + resp_headers, + response.request.absoluteURI.decode("ascii"), + response.code, ) @@ -627,10 +625,10 @@ class CaptchaServerHttpClient(SimpleHttpClient): try: body = yield make_deferred_yieldable(readBody(response)) - defer.returnValue(body) + return body except PartialDownloadError as e: # twisted dislikes google's response, no content length. - defer.returnValue(e.response) + return e.response def encode_urlencode_args(args): diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 054c321a20..c03ddb724f 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -177,7 +177,7 @@ class MatrixFederationAgent(object): res = yield make_deferred_yieldable( agent.request(method, uri, headers, bodyProducer) ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): @@ -205,24 +205,20 @@ class MatrixFederationAgent(object): port = parsed_uri.port if port == -1: port = 8448 - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=port, ) if parsed_uri.port != -1: # there is an explicit port - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=parsed_uri.port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=parsed_uri.port, ) if lookup_well_known: @@ -259,7 +255,7 @@ class MatrixFederationAgent(object): ) res = yield self._route_matrix_uri(new_uri, lookup_well_known=False) - defer.returnValue(res) + return res # try a SRV lookup service_name = b"_matrix._tcp.%s" % (parsed_uri.host,) @@ -283,13 +279,11 @@ class MatrixFederationAgent(object): parsed_uri.host.decode("ascii"), ) - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=target_host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=target_host, + target_port=port, ) @defer.inlineCallbacks @@ -314,7 +308,7 @@ class MatrixFederationAgent(object): if cache_period > 0: self._well_known_cache.set(server_name, result, cache_period) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_get_well_known(self, server_name): @@ -354,7 +348,7 @@ class MatrixFederationAgent(object): # after startup cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - defer.returnValue((None, cache_period)) + return (None, cache_period) result = parsed_body["m.server"].encode("ascii") @@ -369,7 +363,7 @@ class MatrixFederationAgent(object): else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) - defer.returnValue((result, cache_period)) + return (result, cache_period) @implementer(IStreamClientEndpoint) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index ecc88f9b96..b32188766d 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -120,7 +120,7 @@ class SrvResolver(object): if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) - defer.returnValue(servers) + return servers try: answers, _, _ = yield make_deferred_yieldable( @@ -129,7 +129,7 @@ class SrvResolver(object): except DNSNameError: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. - defer.returnValue([]) + return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise @@ -138,7 +138,7 @@ class SrvResolver(object): logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) - defer.returnValue(list(cache_entry)) + return list(cache_entry) else: raise e @@ -169,4 +169,4 @@ class SrvResolver(object): ) self._cache[service_name] = list(servers) - defer.returnValue(servers) + return servers diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index e60334547e..d07d356464 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -158,7 +158,7 @@ def _handle_json_response(reactor, timeout_sec, request, response): response.code, response.phrase.decode("ascii", errors="replace"), ) - defer.returnValue(body) + return body class MatrixFederationHttpClient(object): @@ -256,7 +256,7 @@ class MatrixFederationHttpClient(object): response = yield self._send_request(request, **send_request_args) - defer.returnValue(response) + return response @defer.inlineCallbacks def _send_request( @@ -520,7 +520,7 @@ class MatrixFederationHttpClient(object): _flatten_response_never_received(e), ) raise - defer.returnValue(response) + return response def build_auth_headers( self, destination, method, url_bytes, content=None, destination_is=None @@ -644,7 +644,7 @@ class MatrixFederationHttpClient(object): self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def post_json( @@ -713,7 +713,7 @@ class MatrixFederationHttpClient(object): body = yield _handle_json_response( self.reactor, _sec_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_json( @@ -778,7 +778,7 @@ class MatrixFederationHttpClient(object): self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def delete_json( @@ -836,7 +836,7 @@ class MatrixFederationHttpClient(object): body = yield _handle_json_response( self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_file( @@ -902,7 +902,7 @@ class MatrixFederationHttpClient(object): response.phrase.decode("ascii", errors="replace"), length, ) - defer.returnValue((length, headers)) + return (length, headers) class _ReadBodyToFileProtocol(protocol.Protocol): diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 96a4714d82..fb338ca223 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -89,7 +89,7 @@ the function becomes the operation name for the span. # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Operation names can be explicitly set for functions by using ``trace_using_operation_name`` and @@ -113,7 +113,7 @@ Operation names can be explicitly set for functions by using # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Contexts and carriers --------------------- @@ -694,7 +694,7 @@ def trace_servlet(servlet_name, func): }, ): result = yield defer.maybeDeferred(func, request, *args, **kwargs) - defer.returnValue(result) + return result return _trace_servlet_inner diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7bb020cb45..41147d4292 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -101,7 +101,7 @@ class ModuleApi(object): ) user_id = yield self.register_user(localpart, displayname, emails) _, access_token = yield self.register_device(user_id) - defer.returnValue((user_id, access_token)) + return (user_id, access_token) def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. diff --git a/synapse/notifier.py b/synapse/notifier.py index 918ef64897..bd80c801b6 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -365,7 +365,7 @@ class Notifier(object): current_token = user_stream.current_token result = yield callback(prev_token, current_token) - defer.returnValue(result) + return result @defer.inlineCallbacks def get_events_for( @@ -400,7 +400,7 @@ class Notifier(object): @defer.inlineCallbacks def check_for_updates(before_token, after_token): if not after_token.is_after(before_token): - defer.returnValue(EventStreamResult([], (from_token, from_token))) + return EventStreamResult([], (from_token, from_token)) events = [] end_token = from_token @@ -440,7 +440,7 @@ class Notifier(object): events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) - defer.returnValue(EventStreamResult(events, (from_token, end_token))) + return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: @@ -465,18 +465,18 @@ class Notifier(object): from_token=from_token, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_room_ids(self, user, explicit_room_id): joined_room_ids = yield self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: - defer.returnValue(([explicit_room_id], True)) + return ([explicit_room_id], True) if (yield self._is_world_readable(explicit_room_id)): - defer.returnValue(([explicit_room_id], False)) + return ([explicit_room_id], False) raise AuthError(403, "Non-joined access not allowed") - defer.returnValue((joined_room_ids, True)) + return (joined_room_ids, True) @defer.inlineCallbacks def _is_world_readable(self, room_id): @@ -484,9 +484,9 @@ class Notifier(object): room_id, EventTypes.RoomHistoryVisibility, "" ) if state and "history_visibility" in state.content: - defer.returnValue(state.content["history_visibility"] == "world_readable") + return state.content["history_visibility"] == "world_readable" else: - defer.returnValue(False) + return False @log_function def remove_expired_streams(self): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c8a5b381da..c831975635 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -95,7 +95,7 @@ class BulkPushRuleEvaluator(object): invited ) - defer.returnValue(rules_by_user) + return rules_by_user @cached() def _get_rules_for_room(self, room_id): @@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object): pl_event = auth_events.get(POWER_KEY) - defer.returnValue((pl_event.content if pl_event else {}, sender_level)) + return (pl_event.content if pl_event else {}, sender_level) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): @@ -283,13 +283,13 @@ class RulesForRoom(object): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user with (yield self.linearizer.queue(())): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user self.room_push_rule_cache_metrics.inc_misses() @@ -366,7 +366,7 @@ class RulesForRoom(object): logger.debug( "Returning push rules for %r %r", self.room_id, ret_rules_by_user.keys() ) - defer.returnValue(ret_rules_by_user) + return ret_rules_by_user @defer.inlineCallbacks def _update_rules_with_member_event_ids( diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 4e7b6a5531..5b15b0dbe7 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -258,17 +258,17 @@ class HttpPusher(object): @defer.inlineCallbacks def _process_one(self, push_action): if "notify" not in push_action["actions"]: - defer.returnValue(True) + return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) event = yield self.store.get_event(push_action["event_id"], allow_none=True) if event is None: - defer.returnValue(True) # It's been redacted + return True # It's been redacted rejected = yield self.dispatch_push(event, tweaks, badge) if rejected is False: - defer.returnValue(False) + return False if isinstance(rejected, list) or isinstance(rejected, tuple): for pk in rejected: @@ -282,7 +282,7 @@ class HttpPusher(object): else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.remove_pusher(self.app_id, pk, self.user_id) - defer.returnValue(True) + return True @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): @@ -302,7 +302,7 @@ class HttpPusher(object): ], } } - defer.returnValue(d) + return d ctx = yield push_tools.get_context_for_event( self.store, self.state_handler, event, self.user_id @@ -345,13 +345,13 @@ class HttpPusher(object): if "name" in ctx and len(ctx["name"]) > 0: d["notification"]["room_name"] = ctx["name"] - defer.returnValue(d) + return d @defer.inlineCallbacks def dispatch_push(self, event, tweaks, badge): notification_dict = yield self._build_notification_dict(event, tweaks, badge) if not notification_dict: - defer.returnValue([]) + return [] try: resp = yield self.http_client.post_json_get_json( self.url, notification_dict @@ -364,11 +364,11 @@ class HttpPusher(object): type(e), e, ) - defer.returnValue(False) + return False rejected = [] if "rejected" in resp: rejected = resp["rejected"] - defer.returnValue(rejected) + return rejected @defer.inlineCallbacks def _send_badge(self, badge): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 521c6e2cd7..4245ce26f3 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -316,7 +316,7 @@ class Mailer(object): if not merge: room_vars["notifs"].append(notifvars) - defer.returnValue(room_vars) + return room_vars @defer.inlineCallbacks def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): @@ -343,7 +343,7 @@ class Mailer(object): if messagevars is not None: ret["messages"].append(messagevars) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_message_vars(self, notif, event, room_state_ids): @@ -379,7 +379,7 @@ class Mailer(object): if "body" in event.content: ret["body_text_plain"] = event.content["body"] - defer.returnValue(ret) + return ret def add_text_message_vars(self, messagevars, event): msgformat = event.content.get("format") @@ -428,19 +428,16 @@ class Mailer(object): inviter_name = name_from_member_event(inviter_member_event) if room_name is None: - defer.returnValue( - INVITE_FROM_PERSON - % {"person": inviter_name, "app": self.app_name} - ) + return INVITE_FROM_PERSON % { + "person": inviter_name, + "app": self.app_name, + } else: - defer.returnValue( - INVITE_FROM_PERSON_TO_ROOM - % { - "person": inviter_name, - "room": room_name, - "app": self.app_name, - } - ) + return INVITE_FROM_PERSON_TO_ROOM % { + "person": inviter_name, + "room": room_name, + "app": self.app_name, + } sender_name = None if len(notifs_by_room[room_id]) == 1: @@ -454,26 +451,21 @@ class Mailer(object): sender_name = name_from_member_event(state_event) if sender_name is not None and room_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON_IN_ROOM - % { - "person": sender_name, - "room": room_name, - "app": self.app_name, - } - ) + return MESSAGE_FROM_PERSON_IN_ROOM % { + "person": sender_name, + "room": room_name, + "app": self.app_name, + } elif sender_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON - % {"person": sender_name, "app": self.app_name} - ) + return MESSAGE_FROM_PERSON % { + "person": sender_name, + "app": self.app_name, + } else: # There's more than one notification for this room, so just # say there are several if room_name is not None: - defer.returnValue( - MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} - ) + return MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} else: # If the room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -493,24 +485,19 @@ class Mailer(object): ] ) - defer.returnValue( - MESSAGES_FROM_PERSON - % { - "person": descriptor_from_member_events( - member_events.values() - ), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } else: # Stuff's happened in multiple different rooms # ...but we still refer to the 'reason' room which triggered the mail if reason["room_name"] is not None: - defer.returnValue( - MESSAGES_IN_ROOM_AND_OTHERS - % {"room": reason["room_name"], "app": self.app_name} - ) + return MESSAGES_IN_ROOM_AND_OTHERS % { + "room": reason["room_name"], + "app": self.app_name, + } else: # If the reason room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -527,13 +514,10 @@ class Mailer(object): [room_state_ids[room_id][("m.room.member", s)] for s in sender_ids] ) - defer.returnValue( - MESSAGES_FROM_PERSON_AND_OTHERS - % { - "person": descriptor_from_member_events(member_events.values()), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON_AND_OTHERS % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 06056fbf4f..16a7e8e31d 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -55,7 +55,7 @@ def calculate_room_name( room_state_ids[("m.room.name", "")], allow_none=True ) if m_room_name and m_room_name.content and m_room_name.content["name"]: - defer.returnValue(m_room_name.content["name"]) + return m_room_name.content["name"] # does it have a canonical alias? if ("m.room.canonical_alias", "") in room_state_ids: @@ -68,7 +68,7 @@ def calculate_room_name( and canon_alias.content["alias"] and _looks_like_an_alias(canon_alias.content["alias"]) ): - defer.returnValue(canon_alias.content["alias"]) + return canon_alias.content["alias"] # at this point we're going to need to search the state by all state keys # for an event type, so rearrange the data structure @@ -82,10 +82,10 @@ def calculate_room_name( if alias_event and alias_event.content.get("aliases"): the_aliases = alias_event.content["aliases"] if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]): - defer.returnValue(the_aliases[0]) + return the_aliases[0] if not fallback_to_members: - defer.returnValue(None) + return None my_member_event = None if ("m.room.member", user_id) in room_state_ids: @@ -104,14 +104,13 @@ def calculate_room_name( ) if inviter_member_event: if fallback_to_single_member: - defer.returnValue( - "Invite from %s" - % (name_from_member_event(inviter_member_event),) + return "Invite from %s" % ( + name_from_member_event(inviter_member_event), ) else: return else: - defer.returnValue("Room Invite") + return "Room Invite" # we're going to have to generate a name based on who's in the room, # so find out who is in the room that isn't the user. @@ -154,17 +153,17 @@ def calculate_room_name( # return "Inviting %s" % ( # descriptor_from_member_events(third_party_invites) # ) - defer.returnValue("Inviting email address") + return "Inviting email address" else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE else: - defer.returnValue(name_from_member_event(all_members[0])) + return name_from_member_event(all_members[0]) else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE elif len(other_members) == 1 and not fallback_to_single_member: return else: - defer.returnValue(descriptor_from_member_events(other_members)) + return descriptor_from_member_events(other_members) def descriptor_from_member_events(member_events): diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index e37269cdb9..a54051a726 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -39,7 +39,7 @@ def get_badge_count(store, user_id): # return one badge count per conversation, as count per # message is so noisy as to be almost useless badge += 1 if notifs["notify_count"] else 0 - defer.returnValue(badge) + return badge @defer.inlineCallbacks @@ -61,4 +61,4 @@ def get_context_for_event(store, state_handler, ev, user_id): sender_state_event = yield store.get_event(sender_state_event_id) ctx["sender_display_name"] = name_from_member_event(sender_state_event) - defer.returnValue(ctx) + return ctx diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index df6f670740..08e840fdc2 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -123,7 +123,7 @@ class PusherPool: ) pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user( @@ -224,7 +224,7 @@ class PusherPool: if pusher_dict: pusher = yield self._start_pusher(pusher_dict) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def _start_pushers(self): @@ -293,7 +293,7 @@ class PusherPool: p.on_started(have_notifs) - defer.returnValue(p) + return p @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279f..f5074b101a 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -185,7 +185,7 @@ class ReplicationEndpoint(object): except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to talk to master"), e) - defer.returnValue(result) + return result return send_request diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 61eafbe708..fed4f08820 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -80,7 +80,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): payload = {"events": event_payloads, "backfilled": backfilled} - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request): @@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts, backfilled ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): @@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): result = yield self.registry.on_edu(edu_type, origin, edu_content) - defer.returnValue((200, result)) + return (200, result) class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): result = yield self.registry.on_query(query_type, args) - defer.returnValue((200, result)) + return (200, result) class ReplicationCleanRoomRestServlet(ReplicationEndpoint): @@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): def _handle_request(self, request, room_id): yield self.store.clean_room_for_join(room_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 7c1197e5dd..f17d3a2da4 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): user_id, device_id, initial_display_name, is_guest ) - defer.returnValue((200, {"device_id": device_id, "access_token": access_token})) + return (200, {"device_id": device_id, "access_token": access_token}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 2d9cbbaefc..4217335d88 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): remote_room_hosts, room_id, user_id, event_content ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): yield self.store.locally_reject_invite(user_id, room_id) ret = {} - defer.returnValue((200, ret)) + return (200, ret) class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 2bf2173895..3341320a87 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): address=content["address"], ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @@ -143,7 +143,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): bind_msisdn=bind_msisdn, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 034763fe99..eff7bd7305 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -85,7 +85,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): "extra_users": [u.to_string() for u in extra_users], } - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request, event_id): @@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 7ef67a5a73..c10b85d2ff 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -158,7 +158,7 @@ class Stream(object): updates, current_token = yield self.get_updates_since(self.last_token) self.last_token = current_token - defer.returnValue((updates, current_token)) + return (updates, current_token) @defer.inlineCallbacks def get_updates_since(self, from_token): @@ -172,14 +172,14 @@ class Stream(object): sent over the replication steam. """ if from_token in ("NOW", "now"): - defer.returnValue(([], self.upto_token)) + return ([], self.upto_token) current_token = self.upto_token from_token = int(from_token) if from_token == current_token: - defer.returnValue(([], current_token)) + return ([], current_token) if self._LIMITED: rows = yield self.update_function( @@ -198,7 +198,7 @@ class Stream(object): if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) - defer.returnValue((updates, current_token)) + return (updates, current_token) def current_token(self): """Gets the current token of the underlying streams. Should be provided @@ -297,7 +297,7 @@ class PushRulesStream(Stream): @defer.inlineCallbacks def update_function(self, from_token, to_token, limit): rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit) - defer.returnValue([(row[0], row[2]) for row in rows]) + return [(row[0], row[2]) for row in rows] class PushersStream(Stream): @@ -424,7 +424,7 @@ class AccountDataStream(Stream): for stream_id, user_id, account_data_type, content in global_results ) - defer.returnValue(results) + return results class GroupServerStream(Stream): diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 3d0694bb11..d97669c886 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -134,7 +134,7 @@ class EventsStream(Stream): all_updates = heapq.merge(event_updates, state_updates) - defer.returnValue(all_updates) + return all_updates @classmethod def parse_row(cls, row): diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 6888ae5590..0a7d9b81b2 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -84,7 +84,7 @@ class UsersRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_users() - defer.returnValue((200, ret)) + return (200, ret) class VersionServlet(RestServlet): @@ -227,7 +227,7 @@ class UserRegisterServlet(RestServlet): ) result = yield register._create_registration_details(user_id, body) - defer.returnValue((200, result)) + return (200, result) class WhoisRestServlet(RestServlet): @@ -252,7 +252,7 @@ class WhoisRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_whois(target_user) - defer.returnValue((200, ret)) + return (200, ret) class PurgeMediaCacheRestServlet(RestServlet): @@ -271,7 +271,7 @@ class PurgeMediaCacheRestServlet(RestServlet): ret = yield self.media_repository.delete_old_remote_media(before_ts) - defer.returnValue((200, ret)) + return (200, ret) class PurgeHistoryRestServlet(RestServlet): @@ -356,7 +356,7 @@ class PurgeHistoryRestServlet(RestServlet): room_id, token, delete_local_events=delete_local_events ) - defer.returnValue((200, {"purge_id": purge_id})) + return (200, {"purge_id": purge_id}) class PurgeHistoryStatusRestServlet(RestServlet): @@ -381,7 +381,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): if purge_status is None: raise NotFoundError("purge id '%s' not found" % purge_id) - defer.returnValue((200, purge_status.asdict())) + return (200, purge_status.asdict()) class DeactivateAccountRestServlet(RestServlet): @@ -413,7 +413,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class ShutdownRoomRestServlet(RestServlet): @@ -531,16 +531,14 @@ class ShutdownRoomRestServlet(RestServlet): room_id, new_room_id, requester_user_id ) - defer.returnValue( - ( - 200, - { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, - "new_room_id": new_room_id, - }, - ) + return ( + 200, + { + "kicked_users": kicked_users, + "failed_to_kick_users": failed_to_kick_users, + "local_aliases": aliases_for_room, + "new_room_id": new_room_id, + }, ) @@ -564,7 +562,7 @@ class QuarantineMediaInRoom(RestServlet): room_id, requester.user.to_string() ) - defer.returnValue((200, {"num_quarantined": num_quarantined})) + return (200, {"num_quarantined": num_quarantined}) class ListMediaInRoom(RestServlet): @@ -585,7 +583,7 @@ class ListMediaInRoom(RestServlet): local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs})) + return (200, {"local": local_mxcs, "remote": remote_mxcs}) class ResetPasswordRestServlet(RestServlet): @@ -629,7 +627,7 @@ class ResetPasswordRestServlet(RestServlet): yield self._set_password_handler.set_password( target_user_id, new_password, requester ) - defer.returnValue((200, {})) + return (200, {}) class GetUsersPaginatedRestServlet(RestServlet): @@ -671,7 +669,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -699,7 +697,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) class SearchUsersRestServlet(RestServlet): @@ -742,7 +740,7 @@ class SearchUsersRestServlet(RestServlet): logger.info("term: %s ", term) ret = yield self.handlers.admin_handler.search_users(term) - defer.returnValue((200, ret)) + return (200, ret) class DeleteGroupAdminRestServlet(RestServlet): @@ -765,7 +763,7 @@ class DeleteGroupAdminRestServlet(RestServlet): raise SynapseError(400, "Can only delete local groups") yield self.group_server.delete_group(group_id, requester.user.to_string()) - defer.returnValue((200, {})) + return (200, {}) class AccountValidityRenewServlet(RestServlet): @@ -796,7 +794,7 @@ class AccountValidityRenewServlet(RestServlet): ) res = {"expiration_ts": expiration_ts} - defer.returnValue((200, res)) + return (200, res) ######################################################################################## diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d..90c0ee15dc 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -87,7 +87,7 @@ class SendServerNoticeServlet(RestServlet): event_content=body["content"], ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, txn_id): return self.txns.fetch_or_execute_request( diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 57542c2b4b..4284738021 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet): dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_PUT(self, request, room_alias): @@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet): requester, room_alias, room_id, servers ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_alias): @@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet): service.url, room_alias.to_string(), ) - defer.returnValue((200, {})) + return (200, {}) except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass @@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet): "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) - defer.returnValue((200, {})) + return (200, {}) class ClientDirectoryListServer(RestServlet): @@ -136,9 +136,7 @@ class ClientDirectoryListServer(RestServlet): if room is None: raise NotFoundError("Unknown room") - defer.returnValue( - (200, {"visibility": "public" if room["is_public"] else "private"}) - ) + return (200, {"visibility": "public" if room["is_public"] else "private"}) @defer.inlineCallbacks def on_PUT(self, request, room_id): @@ -151,7 +149,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_id): @@ -161,7 +159,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, "private" ) - defer.returnValue((200, {})) + return (200, {}) class ClientAppserviceDirectoryListServer(RestServlet): @@ -195,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet): requester.app_service.id, network_id, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index d6de2b7360..53ebed2203 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,7 +67,7 @@ class EventStreamRestServlet(RestServlet): is_guest=is_guest, ) - defer.returnValue((200, chunk)) + return (200, chunk) def on_OPTIONS(self, request): return (200, {}) @@ -91,9 +91,9 @@ class EventRestServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 0fe5f2d79b..70b8478e90 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet): include_archived=include_archived, ) - defer.returnValue((200, content)) + return (200, content) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 0d05945f0a..5762b9fd06 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -152,7 +152,7 @@ class LoginRestServlet(RestServlet): well_known_data = self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def _do_other_login(self, login_submission): @@ -212,7 +212,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback_3pid ) - defer.returnValue(result) + return result # No password providers were able to handle this 3pid # Check local store @@ -241,7 +241,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _register_device_with_callback(self, user_id, login_submission, callback=None): @@ -273,7 +273,7 @@ class LoginRestServlet(RestServlet): if callback is not None: yield callback(result) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_token_login(self, login_submission): @@ -284,7 +284,7 @@ class LoginRestServlet(RestServlet): ) result = yield self._register_device_with_callback(user_id, login_submission) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_jwt_login(self, login_submission): @@ -321,7 +321,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( registered_user_id, login_submission ) - defer.returnValue(result) + return result class BaseSSORedirectServlet(RestServlet): @@ -395,7 +395,7 @@ class CasTicketServlet(RestServlet): # even if that's being used old-http style to signal end-of-data body = pde.response result = yield self.handle_cas_response(request, body, client_redirect_url) - defer.returnValue(result) + return result def handle_cas_response(self, request, cas_response_body, client_redirect_url): user, attributes = self.parse_cas_response(cas_response_body) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index cd711be519..2769f3a189 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet): requester.user.to_string(), requester.device_id ) - defer.returnValue((200, {})) + return (200, {}) class LogoutAllRestServlet(RestServlet): @@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet): # .. and then delete any access tokens which weren't associated with # devices. yield self._auth_handler.delete_access_tokens_for_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 3e87f0fdb3..1eb1068c98 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet): state = yield self.presence_handler.get_state(target_user=user) state = format_user_presence_state(state, self.clock.time_msec()) - defer.returnValue((200, state)) + return (200, state) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -88,7 +88,7 @@ class PresenceStatusRestServlet(RestServlet): if self.hs.config.use_presence: yield self.presence_handler.set_state(user, state) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request): return (200, {}) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 4d8ab1f47e..2657ae45bb 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet): if displayname is not None: ret["displayname"] = displayname - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -61,11 +61,11 @@ class ProfileDisplaynameRestServlet(RestServlet): try: new_name = content["displayname"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_displayname(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -110,11 +110,11 @@ class ProfileAvatarURLRestServlet(RestServlet): try: new_name = content["avatar_url"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index e635efb420..c3ae8b98a8 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet): if "attr" in spec: yield self.set_rule_attr(user_id, spec, content) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) if spec["rule_id"].startswith("."): # Rule ids starting with '.' are reserved for server default rules. @@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet): except RuleNotFoundException as e: raise SynapseError(400, str(e)) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, path): @@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet): try: yield self.store.delete_push_rule(user_id, namespaced_rule_id) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) except StoreError as e: if e.code == 404: raise NotFoundError() @@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet): ) if path[0] == "": - defer.returnValue((200, rules)) + return (200, rules) elif path[0] == "global": result = _filter_ruleset_with_path(rules["global"], path[1:]) - defer.returnValue((200, result)) + return (200, result) else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e9246018df..ebc3dec516 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet): if k not in allowed_keys: del p[k] - defer.returnValue((200, {"pushers": pushers})) + return (200, {"pushers": pushers}) def on_OPTIONS(self, _): return 200, {} @@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet): yield self.pusher_pool.remove_pusher( content["app_id"], content["pushkey"], user_id=user.to_string() ) - defer.returnValue((200, {})) + return (200, {}) assert_params_in_dict( content, @@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet): self.notifier.on_new_replication_data() - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -190,7 +190,7 @@ class PushersRemoveRestServlet(RestServlet): ) request.write(PushersRemoveRestServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705..012e7a44a6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -85,7 +85,7 @@ class RoomCreateRestServlet(TransactionRestServlet): requester, self.get_room_config(request) ) - defer.returnValue((200, info)) + return (200, info) def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) @@ -155,9 +155,9 @@ class RoomStateEventRestServlet(TransactionRestServlet): if format == "event": event = format_event_for_client_v2(data.get_dict()) - defer.returnValue((200, event)) + return (200, event) elif format == "content": - defer.returnValue((200, data.get_dict()["content"])) + return (200, data.get_dict()["content"]) @defer.inlineCallbacks def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): @@ -192,7 +192,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): ret = {} if event: ret = {"event_id": event.event_id} - defer.returnValue((200, ret)) + return (200, ret) # TODO: Needs unit testing for generic events + feedback @@ -226,7 +226,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester, event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_GET(self, request, room_id, event_type, txn_id): return (200, "Not implemented") @@ -289,7 +289,7 @@ class JoinRoomAliasServlet(TransactionRestServlet): third_party_signed=content.get("third_party_signed", None), ) - defer.returnValue((200, {"room_id": room_id})) + return (200, {"room_id": room_id}) def on_PUT(self, request, room_identifier, txn_id): return self.txns.fetch_or_execute_request( @@ -342,7 +342,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit=limit, since_token=since_token ) - defer.returnValue((200, data)) + return (200, data) @defer.inlineCallbacks def on_POST(self, request): @@ -387,7 +387,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): network_tuple=network_tuple, ) - defer.returnValue((200, data)) + return (200, data) # TODO: Needs unit testing @@ -438,7 +438,7 @@ class RoomMemberListRestServlet(RestServlet): continue chunk.append(event) - defer.returnValue((200, {"chunk": chunk})) + return (200, {"chunk": chunk}) # deprecated in favour of /members?membership=join? @@ -459,7 +459,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): requester, room_id ) - defer.returnValue((200, {"joined": users_with_profile})) + return (200, {"joined": users_with_profile}) # TODO: Needs better unit testing @@ -492,7 +492,7 @@ class RoomMessageListRestServlet(RestServlet): event_filter=event_filter, ) - defer.returnValue((200, msgs)) + return (200, msgs) # TODO: Needs unit testing @@ -513,7 +513,7 @@ class RoomStateRestServlet(RestServlet): user_id=requester.user.to_string(), is_guest=requester.is_guest, ) - defer.returnValue((200, events)) + return (200, events) # TODO: Needs unit testing @@ -532,7 +532,7 @@ class RoomInitialSyncRestServlet(RestServlet): content = yield self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) - defer.returnValue((200, content)) + return (200, content) class RoomEventServlet(RestServlet): @@ -555,9 +555,9 @@ class RoomEventServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") class RoomEventContextServlet(RestServlet): @@ -607,7 +607,7 @@ class RoomEventContextServlet(RestServlet): results["state"], time_now ) - defer.returnValue((200, results)) + return (200, results) class RoomForgetRestServlet(TransactionRestServlet): @@ -626,7 +626,7 @@ class RoomForgetRestServlet(TransactionRestServlet): yield self.room_member_handler.forget(user=requester.user, room_id=room_id) - defer.returnValue((200, {})) + return (200, {}) def on_PUT(self, request, room_id, txn_id): return self.txns.fetch_or_execute_request( @@ -676,7 +676,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): requester, txn_id, ) - defer.returnValue((200, {})) + return (200, {}) return target = requester.user @@ -703,7 +703,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): if membership_action == "join": return_value["room_id"] = room_id - defer.returnValue((200, return_value)) + return (200, return_value) def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: @@ -745,7 +745,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): txn_id=txn_id, ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, room_id, event_id, txn_id): return self.txns.fetch_or_execute_request( @@ -790,7 +790,7 @@ class RoomTypingRestServlet(RestServlet): target_user=target_user, auth_user=requester.user, room_id=room_id ) - defer.returnValue((200, {})) + return (200, {}) class SearchRestServlet(RestServlet): @@ -812,7 +812,7 @@ class SearchRestServlet(RestServlet): requester.user, content, batch ) - defer.returnValue((200, results)) + return (200, results) class JoinedRoomsRestServlet(RestServlet): @@ -828,7 +828,7 @@ class JoinedRoomsRestServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) room_ids = yield self.store.get_rooms_for_user(requester.user.to_string()) - defer.returnValue((200, {"joined_rooms": list(room_ids)})) + return (200, {"joined_rooms": list(room_ids)}) def register_txn_path(servlet, regex_string, http_server, with_get=False): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 41b3171ac8..497cddf8b8 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -60,18 +60,16 @@ class VoipRestServlet(RestServlet): password = turnPassword else: - defer.returnValue((200, {})) - - defer.returnValue( - ( - 200, - { - "username": username, - "password": password, - "ttl": userLifetime / 1000, - "uris": turnUris, - }, - ) + return (200, {}) + + return ( + 200, + { + "username": username, + "password": password, + "ttl": userLifetime / 1000, + "uris": turnUris, + }, ) def on_OPTIONS(self, request): diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f143d8b85c..7ac456812a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -117,7 +117,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Wrap the session id in a JSON object ret = {"sid": sid} - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def send_password_reset(self, email, client_secret, send_attempt, next_link=None): @@ -149,7 +149,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: # If not, just return a success without sending an email - defer.returnValue(session_id) + return session_id else: # An non-validated session does not exist yet. # Generate a session id @@ -185,7 +185,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): token_expires, ) - defer.returnValue(session_id) + return session_id class MsisdnPasswordRequestTokenRestServlet(RestServlet): @@ -221,7 +221,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class PasswordResetSubmitTokenServlet(RestServlet): @@ -279,7 +279,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.setResponseCode(302) request.setHeader("Location", next_link) finish_request(request) - defer.returnValue(None) + return None # Otherwise show the success template html = self.config.email_password_reset_success_html_content @@ -295,7 +295,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.write(html.encode("utf-8")) finish_request(request) - defer.returnValue(None) + return None def load_jinja2_template(self, template_dir, template_filename, template_vars): """Loads a jinja2 template with variables to insert @@ -330,7 +330,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): ) response_code = 200 if valid else 400 - defer.returnValue((response_code, {"success": valid})) + return (response_code, {"success": valid}) class PasswordRestServlet(RestServlet): @@ -399,7 +399,7 @@ class PasswordRestServlet(RestServlet): yield self._set_password_handler.set_password(user_id, new_password, requester) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -434,7 +434,7 @@ class DeactivateAccountRestServlet(RestServlet): yield self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase ) - defer.returnValue((200, {})) + return (200, {}) yield self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -447,7 +447,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class EmailThreepidRequestTokenRestServlet(RestServlet): @@ -481,7 +481,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -516,7 +516,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class ThreepidRestServlet(RestServlet): @@ -536,7 +536,7 @@ class ThreepidRestServlet(RestServlet): threepids = yield self.datastore.user_get_threepids(requester.user.to_string()) - defer.returnValue((200, {"threepids": threepids})) + return (200, {"threepids": threepids}) @defer.inlineCallbacks def on_POST(self, request): @@ -568,7 +568,7 @@ class ThreepidRestServlet(RestServlet): logger.debug("Binding threepid %s to %s", threepid, user_id) yield self.identity_handler.bind_threepid(threePidCreds, user_id) - defer.returnValue((200, {})) + return (200, {}) class ThreepidDeleteRestServlet(RestServlet): @@ -603,7 +603,7 @@ class ThreepidDeleteRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class WhoamiRestServlet(RestServlet): @@ -617,7 +617,7 @@ class WhoamiRestServlet(RestServlet): def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - defer.returnValue((200, {"user_id": requester.user.to_string()})) + return (200, {"user_id": requester.user.to_string()}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f155c26259..98f2f6f4b5 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, account_data_type): @@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet): if event is None: raise NotFoundError("Account data not found") - defer.returnValue((200, event)) + return (200, event) class RoomAccountDataServlet(RestServlet): @@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, room_id, account_data_type): @@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet): if event is None: raise NotFoundError("Room account data not found") - defer.returnValue((200, event)) + return (200, event) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index d29c10b83d..133c61900a 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -58,7 +58,7 @@ class AccountValidityRenewServlet(RestServlet): ) request.write(AccountValidityRenewServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None class AccountValiditySendMailServlet(RestServlet): @@ -87,7 +87,7 @@ class AccountValiditySendMailServlet(RestServlet): user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bebc2951e7..f21aff39e5 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -207,7 +207,7 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None elif stagetype == LoginType.TERMS: if ("session" not in request.args or len(request.args["session"])) == 0: raise SynapseError(400, "No session supplied") @@ -239,7 +239,7 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None else: raise SynapseError(404, "Unknown auth stage type") diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index fc7e2f4dd5..a4fa45fe11 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet): "m.change_password": {"enabled": change_password}, } } - defer.returnValue((200, response)) + return (200, response) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index d279229d74..9adf76cc0c 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet): devices = yield self.device_handler.get_devices_by_user( requester.user.to_string() ) - defer.returnValue((200, {"devices": devices})) + return (200, {"devices": devices}) class DeleteDevicesRestServlet(RestServlet): @@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet): yield self.device_handler.delete_devices( requester.user.to_string(), body["devices"] ) - defer.returnValue((200, {})) + return (200, {}) class DeviceRestServlet(RestServlet): @@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet): device = yield self.device_handler.get_device( requester.user.to_string(), device_id ) - defer.returnValue((200, device)) + return (200, device) @interactive_auth_handler @defer.inlineCallbacks @@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet): ) yield self.device_handler.delete_device(requester.user.to_string(), device_id) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, device_id): @@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet): yield self.device_handler.update_device( requester.user.to_string(), device_id, body ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 3f0adf4a21..22be0ee3c5 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet): user_localpart=target_user.localpart, filter_id=filter_id ) - defer.returnValue((200, filter.get_filter_json())) + return (200, filter.get_filter_json()) except (KeyError, StoreError): raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) @@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet): user_localpart=target_user.localpart, user_filter=content ) - defer.returnValue((200, {"filter_id": str(filter_id)})) + return (200, {"filter_id": str(filter_id)}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index a312dd2593..e629c4256d 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -47,7 +47,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, group_description)) + return (200, group_description) @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -59,7 +59,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, {})) + return (200, {}) class GroupSummaryServlet(RestServlet): @@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, get_group_summary)) + return (200, get_group_summary) class GroupSummaryRoomsCatServlet(RestServlet): @@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): @@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): group_id, requester_user_id, room_id=room_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoryServlet(RestServlet): @@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): @@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): @@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoriesServlet(RestServlet): @@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupRoleServlet(RestServlet): @@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): @@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): @@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRolesServlet(RestServlet): @@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupSummaryUsersRoleServlet(RestServlet): @@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id, user_id): @@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): group_id, requester_user_id, user_id=user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRoomServlet(RestServlet): @@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupUsersServlet(RestServlet): @@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupInvitedUsersServlet(RestServlet): @@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupSettingJoinPolicyServlet(RestServlet): @@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupCreateServlet(RestServlet): @@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsServlet(RestServlet): @@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id, content ) - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): @@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsConfigServlet(RestServlet): @@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): group_id, requester_user_id, room_id, config_key, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersInviteServlet(RestServlet): @@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet): group_id, user_id, requester_user_id, config ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersKickServlet(RestServlet): @@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet): group_id, user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfLeaveServlet(RestServlet): @@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet): group_id, requester_user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfJoinServlet(RestServlet): @@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfAcceptInviteServlet(RestServlet): @@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfUpdatePublicityServlet(RestServlet): @@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): publicise = content["publicise"] yield self.store.update_group_publicity(group_id, requester_user_id, publicise) - defer.returnValue((200, {})) + return (200, {}) class PublicisedGroupsForUserServlet(RestServlet): @@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_publicised_groups_for_user(user_id) - defer.returnValue((200, result)) + return (200, result) class PublicisedGroupsForUsersServlet(RestServlet): @@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): result = yield self.groups_handler.bulk_get_publicised_groups(user_ids) - defer.returnValue((200, result)) + return (200, result) class GroupsForUserServlet(RestServlet): @@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_joined_groups(requester_user_id) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 45c9928b65..6008adec7c 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -95,7 +95,7 @@ class KeyUploadServlet(RestServlet): result = yield self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) - defer.returnValue((200, result)) + return (200, result) class KeyQueryServlet(RestServlet): @@ -149,7 +149,7 @@ class KeyQueryServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.query_devices(body, timeout) - defer.returnValue((200, result)) + return (200, result) class KeyChangesServlet(RestServlet): @@ -189,7 +189,7 @@ class KeyChangesServlet(RestServlet): results = yield self.device_handler.get_user_ids_changed(user_id, from_token) - defer.returnValue((200, results)) + return (200, results) class OneTimeKeyServlet(RestServlet): @@ -224,7 +224,7 @@ class OneTimeKeyServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 728a52328f..d034863a3c 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,9 +88,7 @@ class NotificationsServlet(RestServlet): returned_push_actions.append(returned_pa) next_token = str(pa["stream_ordering"]) - defer.returnValue( - (200, {"notifications": returned_push_actions, "next_token": next_token}) - ) + return (200, {"notifications": returned_push_actions, "next_token": next_token}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index b1b5385b09..b4925c0f59 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -83,16 +83,14 @@ class IdTokenServlet(RestServlet): yield self.store.insert_open_id_token(token, ts_valid_until_ms, user_id) - defer.returnValue( - ( - 200, - { - "access_token": token, - "token_type": "Bearer", - "matrix_server_name": self.server_name, - "expires_in": self.EXPIRES_MS / 1000, - }, - ) + return ( + 200, + { + "access_token": token, + "token_type": "Bearer", + "matrix_server_name": self.server_name, + "expires_in": self.EXPIRES_MS / 1000, + }, ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index e75664279b..d93d6a9f24 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet): event_id=read_marker_event_id, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 488905626a..98a97b7059 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet): room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f327999e59..05ea1459e3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -95,7 +95,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -138,7 +138,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): ) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class UsernameAvailabilityRestServlet(RestServlet): @@ -178,7 +178,7 @@ class UsernameAvailabilityRestServlet(RestServlet): yield self.registration_handler.check_username(username) - defer.returnValue((200, {"available": True})) + return (200, {"available": True}) class RegisterRestServlet(RestServlet): @@ -230,7 +230,7 @@ class RegisterRestServlet(RestServlet): if kind == b"guest": ret = yield self._do_guest_registration(body, address=client_addr) - defer.returnValue(ret) + return ret return elif kind != b"user": raise UnrecognizedRequestError( @@ -282,7 +282,7 @@ class RegisterRestServlet(RestServlet): result = yield self._do_appservice_registration( desired_username, access_token, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # for either shared secret or regular registration, downcase the @@ -301,7 +301,7 @@ class RegisterRestServlet(RestServlet): result = yield self._do_shared_secret_registration( desired_username, desired_password, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # == Normal User Registration == (everyone else) @@ -500,7 +500,7 @@ class RegisterRestServlet(RestServlet): bind_msisdn=params.get("bind_msisdn"), ) - defer.returnValue((200, return_dict)) + return (200, return_dict) def on_OPTIONS(self, _): return 200, {} @@ -510,7 +510,7 @@ class RegisterRestServlet(RestServlet): user_id = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue((yield self._create_registration_details(user_id, body))) + return (yield self._create_registration_details(user_id, body)) @defer.inlineCallbacks def _do_shared_secret_registration(self, username, password, body): @@ -546,7 +546,7 @@ class RegisterRestServlet(RestServlet): ) result = yield self._create_registration_details(user_id, body) - defer.returnValue(result) + return result @defer.inlineCallbacks def _create_registration_details(self, user_id, params): @@ -570,7 +570,7 @@ class RegisterRestServlet(RestServlet): ) result.update({"access_token": access_token, "device_id": device_id}) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_guest_registration(self, params, address=None): @@ -588,16 +588,14 @@ class RegisterRestServlet(RestServlet): user_id, device_id, initial_display_name, is_guest=True ) - defer.returnValue( - ( - 200, - { - "user_id": user_id, - "device_id": device_id, - "access_token": access_token, - "home_server": self.hs.hostname, - }, - ) + return ( + 200, + { + "user_id": user_id, + "device_id": device_id, + "access_token": access_token, + "home_server": self.hs.hostname, + }, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284..6fde3decdb 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -116,7 +116,7 @@ class RelationSendServlet(RestServlet): requester, event_dict=event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) class RelationPaginationServlet(RestServlet): @@ -196,7 +196,7 @@ class RelationPaginationServlet(RestServlet): return_value["chunk"] = events return_value["original_event"] = original_event - defer.returnValue((200, return_value)) + return (200, return_value) class RelationAggregationPaginationServlet(RestServlet): @@ -268,7 +268,7 @@ class RelationAggregationPaginationServlet(RestServlet): to_token=to_token, ) - defer.returnValue((200, pagination_chunk.to_dict())) + return (200, pagination_chunk.to_dict()) class RelationAggregationGroupPaginationServlet(RestServlet): @@ -354,7 +354,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): return_value = result.to_dict() return_value["chunk"] = events - defer.returnValue((200, return_value)) + return (200, return_value) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index e7578af804..3fdd4584a3 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet): received_ts=self.clock.time_msec(), ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8d1b810565..10dec96208 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet): body = {"rooms": {room_id: body}} yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet): else: room_keys = room_keys["rooms"][room_id] - defer.returnValue((200, room_keys)) + return (200, room_keys) @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): @@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet): yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - defer.returnValue((200, {})) + return (200, {}) class RoomKeysNewVersionServlet(RestServlet): @@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet): info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version(user_id, info) - defer.returnValue((200, {"version": new_version})) + return (200, {"version": new_version}) # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning @@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet): except SynapseError as e: if e.code == 404: raise SynapseError(404, "No backup found", Codes.NOT_FOUND) - defer.returnValue((200, info)) + return (200, info) @defer.inlineCallbacks def on_DELETE(self, request, version): @@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet): user_id = requester.user.to_string() yield self.e2e_room_keys_handler.delete_version(user_id, version) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, version): @@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet): ) yield self.e2e_room_keys_handler.update_version(user_id, version, info) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index d7f7faa029..14ba61a63e 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet): ret = {"replacement_room": new_room_id} - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 78075b8fc0..2613648d82 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -60,7 +60,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): ) response = (200, {}) - defer.returnValue(response) + return response def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 02d56dee6c..7b32dd2212 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet): time_now, sync_result, requester.access_token_id, filter ) - defer.returnValue((200, response_content)) + return (200, response_content) @defer.inlineCallbacks def encode_response(self, time_now, sync_result, access_token_id, filter): @@ -205,27 +205,23 @@ class SyncRestServlet(RestServlet): event_formatter, ) - defer.returnValue( - { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists.changed), - "left": list(sync_result.device_lists.left), - }, - "presence": SyncRestServlet.encode_presence( - sync_result.presence, time_now - ), - "rooms": {"join": joined, "invite": invited, "leave": archived}, - "groups": { - "join": sync_result.groups.join, - "invite": sync_result.groups.invite, - "leave": sync_result.groups.leave, - }, - "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "next_batch": sync_result.next_batch.to_string(), - } - ) + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), + }, + "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), + "rooms": {"join": joined, "invite": invited, "leave": archived}, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, + "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "next_batch": sync_result.next_batch.to_string(), + } @staticmethod def encode_presence(events, time_now): @@ -273,7 +269,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_invited(self, rooms, time_now, token_id, event_formatter): @@ -309,7 +305,7 @@ class SyncRestServlet(RestServlet): invited_state.append(invite) invited[room.room_id] = {"invite_state": {"events": invited_state}} - defer.returnValue(invited) + return invited @defer.inlineCallbacks def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter): @@ -342,7 +338,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_room( @@ -414,7 +410,7 @@ class SyncRestServlet(RestServlet): result["unread_notifications"] = room.unread_notifications result["summary"] = room.summary - defer.returnValue(result) + return result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 07b6ede603..d173544355 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -45,7 +45,7 @@ class TagListServlet(RestServlet): tags = yield self.store.get_tags_for_room(user_id, room_id) - defer.returnValue((200, {"tags": tags})) + return (200, {"tags": tags}) class TagServlet(RestServlet): @@ -76,7 +76,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, user_id, room_id, tag): @@ -88,7 +88,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 1e66662a05..158e686b01 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) protocols = yield self.appservice_handler.get_3pe_protocols() - defer.returnValue((200, protocols)) + return (200, protocols) class ThirdPartyProtocolServlet(RestServlet): @@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet): only_protocol=protocol ) if protocol in protocols: - defer.returnValue((200, protocols[protocol])) + return (200, protocols[protocol]) else: - defer.returnValue((404, {"error": "Unknown protocol"})) + return (404, {"error": "Unknown protocol"}) class ThirdPartyUserServlet(RestServlet): @@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet): ThirdPartyEntityKind.USER, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) class ThirdPartyLocationServlet(RestServlet): @@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet): ThirdPartyEntityKind.LOCATION, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index e19fb6d583..7ab2b80e46 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id = requester.user.to_string() if not self.hs.config.user_directory_search_enabled: - defer.returnValue((200, {"limited": False, "results": []})) + return (200, {"limited": False, "results": []}) body = parse_json_object_from_request(request) @@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id, search_term, limit ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 65afffbb42..92beefa176 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -171,7 +171,7 @@ class MediaRepository(object): yield self._generate_thumbnails(None, media_id, media_id, media_type) - defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) + return "mxc://%s/%s" % (self.server_name, media_id) @defer.inlineCallbacks def get_local_media(self, request, media_id, name): @@ -282,7 +282,7 @@ class MediaRepository(object): with responder: pass - defer.returnValue(media_info) + return media_info @defer.inlineCallbacks def _get_remote_media_impl(self, server_name, media_id): @@ -317,14 +317,14 @@ class MediaRepository(object): responder = yield self.media_storage.fetch_media(file_info) if responder: - defer.returnValue((responder, media_info)) + return (responder, media_info) # Failed to find the file anywhere, lets download it. media_info = yield self._download_remote_file(server_name, media_id, file_id) responder = yield self.media_storage.fetch_media(file_info) - defer.returnValue((responder, media_info)) + return (responder, media_info) @defer.inlineCallbacks def _download_remote_file(self, server_name, media_id, file_id): @@ -421,7 +421,7 @@ class MediaRepository(object): yield self._generate_thumbnails(server_name, media_id, file_id, media_type) - defer.returnValue(media_info) + return media_info def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) @@ -500,7 +500,7 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def generate_remote_exact_thumbnail( @@ -554,7 +554,7 @@ class MediaRepository(object): t_len, ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def _generate_thumbnails( @@ -667,7 +667,7 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue({"width": m_width, "height": m_height}) + return {"width": m_width, "height": m_height} @defer.inlineCallbacks def delete_old_remote_media(self, before_ts): @@ -704,7 +704,7 @@ class MediaRepository(object): yield self.store.delete_remote_media(origin, media_id) deleted += 1 - defer.returnValue({"deleted": deleted}) + return {"deleted": deleted} class MediaRepositoryResource(Resource): diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 25e5ac2848..3b87717a5a 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -69,7 +69,7 @@ class MediaStorage(object): ) yield finish_cb() - defer.returnValue(fname) + return fname @contextlib.contextmanager def store_into_file(self, file_info): @@ -143,14 +143,14 @@ class MediaStorage(object): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(FileResponder(open(local_path, "rb"))) + return FileResponder(open(local_path, "rb")) for provider in self.storage_providers: res = yield provider.fetch(path, file_info) if res: - defer.returnValue(res) + return res - defer.returnValue(None) + return None @defer.inlineCallbacks def ensure_media_is_in_local_cache(self, file_info): @@ -166,7 +166,7 @@ class MediaStorage(object): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(local_path) + return local_path dirname = os.path.dirname(local_path) if not os.path.exists(dirname): @@ -181,7 +181,7 @@ class MediaStorage(object): ) yield res.write_to_consumer(consumer) yield consumer.wait() - defer.returnValue(local_path) + return local_path raise Exception("file could not be found") diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 5871737bfd..bd40891a7f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -182,7 +182,7 @@ class PreviewUrlResource(DirectServeResource): og = cache_result["og"] if isinstance(og, six.text_type): og = og.encode("utf8") - defer.returnValue(og) + return og return media_info = yield self._download_url(url, user) @@ -284,7 +284,7 @@ class PreviewUrlResource(DirectServeResource): media_info["created_ts"], ) - defer.returnValue(jsonog) + return jsonog @defer.inlineCallbacks def _download_url(self, url, user): @@ -354,22 +354,20 @@ class PreviewUrlResource(DirectServeResource): # therefore not expire it. raise - defer.returnValue( - { - "media_type": media_type, - "media_length": length, - "download_name": download_name, - "created_ts": time_now_ms, - "filesystem_id": file_id, - "filename": fname, - "uri": uri, - "response_code": code, - # FIXME: we should calculate a proper expiration based on the - # Cache-Control and Expire headers. But for now, assume 1 hour. - "expires": 60 * 60 * 1000, - "etag": headers["ETag"][0] if "ETag" in headers else None, - } - ) + return { + "media_type": media_type, + "media_length": length, + "download_name": download_name, + "created_ts": time_now_ms, + "filesystem_id": file_id, + "filename": fname, + "uri": uri, + "response_code": code, + # FIXME: we should calculate a proper expiration based on the + # Cache-Control and Expire headers. But for now, assume 1 hour. + "expires": 60 * 60 * 1000, + "etag": headers["ETag"][0] if "ETag" in headers else None, + } def _start_expire_url_cache_data(self): return run_as_background_process( diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index f183743f31..729c097e6d 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object): if event_id in referenced_events: referenced_events.remove(event.event_id) - defer.returnValue((currently_blocked, referenced_events)) + return (currently_blocked, referenced_events) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 71e7e75320..2dac90578c 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -86,7 +86,7 @@ class ServerNoticesManager(object): res = yield self._event_creation_handler.create_and_send_nonmember_event( requester, event_dict, ratelimit=False ) - defer.returnValue(res) + return res @cachedInlineCallbacks() def get_notice_room_for_user(self, user_id): @@ -120,7 +120,7 @@ class ServerNoticesManager(object): # we found a room which our user shares with the system notice # user logger.info("Using room %s", room.room_id) - defer.returnValue(room.room_id) + return room.room_id # apparently no existing notice room: create a new one logger.info("Creating server notices room for %s", user_id) @@ -158,4 +158,4 @@ class ServerNoticesManager(object): self._notifier.on_new_event("account_data_key", max_id, users=[user_id]) logger.info("Created server notices room %s for %s", room_id, user_id) - defer.returnValue(room_id) + return room_id diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9f708fa205..a0d34f16ea 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -135,7 +135,7 @@ class StateHandler(object): event = None if event_id: event = yield self.store.get_event(event_id, allow_none=True) - defer.returnValue(event) + return event return state_map = yield self.store.get_events( @@ -145,7 +145,7 @@ class StateHandler(object): key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map } - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_state_ids(self, room_id, latest_event_ids=None): @@ -169,7 +169,7 @@ class StateHandler(object): ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) state = ret.state - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_users_in_room(self, room_id, latest_event_ids=None): @@ -189,7 +189,7 @@ class StateHandler(object): logger.debug("calling resolve_state_groups from get_current_users_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_users = yield self.store.get_joined_users_from_state(room_id, entry) - defer.returnValue(joined_users) + return joined_users @defer.inlineCallbacks def get_current_hosts_in_room(self, room_id, latest_event_ids=None): @@ -198,7 +198,7 @@ class StateHandler(object): logger.debug("calling resolve_state_groups from get_current_hosts_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_hosts = yield self.store.get_joined_hosts(room_id, entry) - defer.returnValue(joined_hosts) + return joined_hosts @defer.inlineCallbacks def compute_event_context(self, event, old_state=None): @@ -241,7 +241,7 @@ class StateHandler(object): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context if old_state: # We already have the state, so we don't need to calculate it. @@ -275,7 +275,7 @@ class StateHandler(object): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context logger.debug("calling resolve_state_groups from compute_event_context") @@ -343,7 +343,7 @@ class StateHandler(object): delta_ids=delta_ids, ) - defer.returnValue(context) + return context @defer.inlineCallbacks def resolve_state_groups_for_events(self, room_id, event_ids): @@ -368,19 +368,17 @@ class StateHandler(object): state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids) if len(state_groups_ids) == 0: - defer.returnValue(_StateCacheEntry(state={}, state_group=None)) + return _StateCacheEntry(state={}, state_group=None) elif len(state_groups_ids) == 1: name, state_list = list(state_groups_ids.items()).pop() prev_group, delta_ids = yield self.store.get_state_group_delta(name) - defer.returnValue( - _StateCacheEntry( - state=state_list, - state_group=name, - prev_group=prev_group, - delta_ids=delta_ids, - ) + return _StateCacheEntry( + state=state_list, + state_group=name, + prev_group=prev_group, + delta_ids=delta_ids, ) room_version = yield self.store.get_room_version(room_id) @@ -392,7 +390,7 @@ class StateHandler(object): None, state_res_store=StateResolutionStore(self.store), ) - defer.returnValue(result) + return result @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): @@ -415,7 +413,7 @@ class StateHandler(object): new_state = {key: state_map[ev_id] for key, ev_id in iteritems(new_state)} - defer.returnValue(new_state) + return new_state class StateResolutionHandler(object): @@ -479,7 +477,7 @@ class StateResolutionHandler(object): if self._state_cache is not None: cache = self._state_cache.get(group_names, None) if cache: - defer.returnValue(cache) + return cache logger.info( "Resolving state for %s with %d groups", room_id, len(state_groups_ids) @@ -525,7 +523,7 @@ class StateResolutionHandler(object): if self._state_cache is not None: self._state_cache[group_names] = cache - defer.returnValue(cache) + return cache def _make_state_cache_entry(new_state, state_groups_ids): diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 88acd4817e..a2f92d9ff9 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -55,7 +55,7 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): a map from (type, state_key) to event_id. """ if len(state_sets) == 1: - defer.returnValue(state_sets[0]) + return state_sets[0] unconflicted_state, conflicted_state = _seperate(state_sets) @@ -97,10 +97,8 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) - defer.returnValue( - _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) + return _resolve_with_state( + unconflicted_state, conflicted_state, auth_events, state_map ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index db969e8997..b327c86f40 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -63,7 +63,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto unconflicted_state, conflicted_state = _seperate(state_sets) if not conflicted_state: - defer.returnValue(unconflicted_state) + return unconflicted_state logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") @@ -137,7 +137,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto logger.debug("done") - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -168,18 +168,18 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store): aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.Create, ""): if aev.content.get("creator") == event.sender: - defer.returnValue(100) + return 100 break - defer.returnValue(0) + return 0 level = pl.content.get("users", {}).get(event.sender) if level is None: level = pl.content.get("users_default", 0) if level is None: - defer.returnValue(0) + return 0 else: - defer.returnValue(int(level)) + return int(level) @defer.inlineCallbacks @@ -224,7 +224,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) union = set().union(*auth_sets) - defer.returnValue(union - intersection) + return union - intersection def _seperate(state_sets): @@ -343,7 +343,7 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_ it = lexicographical_topological_sort(graph, key=_get_power_order) sorted_events = list(it) - defer.returnValue(sorted_events) + return sorted_events @defer.inlineCallbacks @@ -396,7 +396,7 @@ def _iterative_auth_checks( except AuthError: pass - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -439,7 +439,7 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor event_ids.sort(key=lambda ev_id: order_map[ev_id]) - defer.returnValue(event_ids) + return event_ids @defer.inlineCallbacks @@ -462,7 +462,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor while event: depth = mainline_map.get(event.event_id) if depth is not None: - defer.returnValue(depth) + return depth auth_events = event.auth_event_ids() event = None @@ -474,7 +474,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor break # Didn't find a power level auth event, so we just return 0 - defer.returnValue(0) + return 0 @defer.inlineCallbacks @@ -493,7 +493,7 @@ def _get_event(event_id, event_map, state_res_store): if event_id not in event_map: events = yield state_res_store.get_events([event_id], allow_rejected=True) event_map.update(events) - defer.returnValue(event_map[event_id]) + return event_map[event_id] def lexicographical_topological_sort(graph, key): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 86a333a919..e7f6ea7286 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -498,7 +498,7 @@ class DataStore( ) count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn) retval = {"users": users, "total": count} - defer.returnValue(retval) + return retval def search_users(self, term): """Function to search users list for one or more users with diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index a7c93efa46..489ce82fae 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -513,7 +513,7 @@ class SQLBaseStore(object): after_callback(*after_args, **after_kwargs) raise - defer.returnValue(result) + return result @defer.inlineCallbacks def runWithConnection(self, func, *args, **kwargs): @@ -553,7 +553,7 @@ class SQLBaseStore(object): with PreserveLoggingContext(): result = yield self._db_pool.runWithConnection(inner_func, *args, **kwargs) - defer.returnValue(result) + return result @staticmethod def cursor_to_dict(cursor): @@ -615,8 +615,8 @@ class SQLBaseStore(object): # a cursor after we receive an error from the db. if not or_ignore: raise - defer.returnValue(False) - defer.returnValue(True) + return False + return True @staticmethod def _simple_insert_txn(txn, table, values): @@ -708,7 +708,7 @@ class SQLBaseStore(object): insertion_values, lock=lock, ) - defer.returnValue(result) + return result except self.database_engine.module.IntegrityError as e: attempts += 1 if attempts >= 5: @@ -1121,7 +1121,7 @@ class SQLBaseStore(object): results = [] if not iterable: - defer.returnValue(results) + return results # iterables can not be sliced, so convert it to a list first it_list = list(iterable) @@ -1142,7 +1142,7 @@ class SQLBaseStore(object): results.extend(rows) - defer.returnValue(results) + return results @classmethod def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols): diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 8394389073..9fa5b4f3d6 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -111,9 +111,9 @@ class AccountDataWorkerStore(SQLBaseStore): ) if result: - defer.returnValue(json.loads(result)) + return json.loads(result) else: - defer.returnValue(None) + return None @cached(num_args=2) def get_account_data_for_room(self, user_id, room_id): @@ -264,11 +264,9 @@ class AccountDataWorkerStore(SQLBaseStore): on_invalidate=cache_context.invalidate, ) if not ignored_account_data: - defer.returnValue(False) + return False - defer.returnValue( - ignored_user_id in ignored_account_data.get("ignored_users", {}) - ) + return ignored_user_id in ignored_account_data.get("ignored_users", {}) class AccountDataStore(AccountDataWorkerStore): @@ -332,7 +330,7 @@ class AccountDataStore(AccountDataWorkerStore): ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def add_account_data_for_user(self, user_id, account_data_type, content): @@ -373,7 +371,7 @@ class AccountDataStore(AccountDataWorkerStore): ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_max_stream_id(self, next_id): """Update the max stream_id diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eb329ebd8b..05d9c05c3f 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -145,7 +145,7 @@ class ApplicationServiceTransactionWorkerStore( for service in as_list: if service.id == res["as_id"]: services.append(service) - defer.returnValue(services) + return services @defer.inlineCallbacks def get_appservice_state(self, service): @@ -164,9 +164,9 @@ class ApplicationServiceTransactionWorkerStore( desc="get_appservice_state", ) if result: - defer.returnValue(result.get("state")) + return result.get("state") return - defer.returnValue(None) + return None def set_appservice_state(self, service, state): """Set the application service state. @@ -298,15 +298,13 @@ class ApplicationServiceTransactionWorkerStore( ) if not entry: - defer.returnValue(None) + return None event_ids = json.loads(entry["event_ids"]) events = yield self.get_events_as_list(event_ids) - defer.returnValue( - AppServiceTransaction(service=service, id=entry["txn_id"], events=events) - ) + return AppServiceTransaction(service=service, id=entry["txn_id"], events=events) def _get_last_txn(self, txn, service_id): txn.execute( @@ -360,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 50f913a414..e5f0668f09 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -115,7 +115,7 @@ class BackgroundUpdateStore(SQLBaseStore): " Unscheduling background update task." ) self._all_done = True - defer.returnValue(None) + return None @defer.inlineCallbacks def has_completed_background_updates(self): @@ -127,11 +127,11 @@ class BackgroundUpdateStore(SQLBaseStore): # if we've previously determined that there is nothing left to do, that # is easy if self._all_done: - defer.returnValue(True) + return True # obviously, if we have things in our queue, we're not done. if self._background_update_queue: - defer.returnValue(False) + return False # otherwise, check if there are updates to be run. This is important, # as we may be running on a worker which doesn't perform the bg updates @@ -144,9 +144,9 @@ class BackgroundUpdateStore(SQLBaseStore): ) if not updates: self._all_done = True - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def do_next_background_update(self, desired_duration_ms): @@ -173,14 +173,14 @@ class BackgroundUpdateStore(SQLBaseStore): if not self._background_update_queue: # no work left to do - defer.returnValue(None) + return None # pop from the front, and add back to the back update_name = self._background_update_queue.pop(0) self._background_update_queue.append(update_name) res = yield self._do_background_update(update_name, desired_duration_ms) - defer.returnValue(res) + return res @defer.inlineCallbacks def _do_background_update(self, update_name, desired_duration_ms): @@ -231,7 +231,7 @@ class BackgroundUpdateStore(SQLBaseStore): performance.update(items_updated, duration_ms) - defer.returnValue(len(self._background_update_performance)) + return len(self._background_update_performance) def register_background_update_handler(self, update_name, update_handler): """Register a handler for doing a background update. @@ -266,7 +266,7 @@ class BackgroundUpdateStore(SQLBaseStore): @defer.inlineCallbacks def noop_update(progress, batch_size): yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, noop_update) @@ -370,7 +370,7 @@ class BackgroundUpdateStore(SQLBaseStore): logger.info("Adding index %s to %s", index_name, table) yield self.runWithConnection(runner) yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, updater) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index bda68de5be..6db8c54077 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -104,7 +104,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): yield self.runWithConnection(f) yield self._end_background_update("user_ips_drop_nonunique_index") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _analyze_user_ip(self, progress, batch_size): @@ -121,7 +121,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): yield self._end_background_update("user_ips_analyze") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _remove_user_ip_dupes(self, progress, batch_size): @@ -291,7 +291,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): if last: yield self._end_background_update("user_ips_remove_dupes") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def insert_client_ip( @@ -401,7 +401,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): "device_id": did, "last_seen": last_seen, } - defer.returnValue(ret) + return ret @classmethod def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols): @@ -461,14 +461,12 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"])) for row in rows ) - defer.returnValue( - list( - { - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "last_seen": last_seen, - } - for (access_token, ip), (user_agent, last_seen) in iteritems(results) - ) + return list( + { + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "last_seen": last_seen, + } + for (access_token, ip), (user_agent, last_seen) in iteritems(results) ) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 4ea0deea4f..79bb0ea46d 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -92,7 +92,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): user_id, last_deleted_stream_id ) if not has_changed: - defer.returnValue(0) + return 0 def delete_messages_for_device_txn(txn): sql = ( @@ -115,7 +115,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): last_deleted_stream_id, up_to_stream_id ) - defer.returnValue(count) + return count def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit @@ -263,7 +263,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): destination, stream_id ) - defer.returnValue(self._device_inbox_id_gen.get_current_token()) + return self._device_inbox_id_gen.get_current_token() @defer.inlineCallbacks def add_messages_from_remote_to_device_inbox( @@ -312,7 +312,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): for user_id in local_messages_by_user_then_device.keys(): self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id) - defer.returnValue(stream_id) + return stream_id def _add_messages_to_local_device_inbox_txn( self, txn, stream_id, messages_by_user_then_device @@ -426,4 +426,4 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): yield self._end_background_update(self.DEVICE_INBOX_STREAM_ID) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d2b113a4e7..8f72d92895 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -71,7 +71,7 @@ class DeviceWorkerStore(SQLBaseStore): desc="get_devices_by_user", ) - defer.returnValue({d["device_id"]: d for d in devices}) + return {d["device_id"]: d for d in devices} @defer.inlineCallbacks def get_devices_by_remote(self, destination, from_stream_id, limit): @@ -88,7 +88,7 @@ class DeviceWorkerStore(SQLBaseStore): destination, int(from_stream_id) ) if not has_changed: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # We retrieve n+1 devices from the list of outbound pokes where n is # our outbound device update limit. We then check if the very last @@ -111,7 +111,7 @@ class DeviceWorkerStore(SQLBaseStore): # Return an empty list if there are no updates if not updates: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # if we have exceeded the limit, we need to exclude any results with the # same stream_id as the last row. @@ -147,13 +147,13 @@ class DeviceWorkerStore(SQLBaseStore): # skip that stream_id and return an empty list, and continue with the next # stream_id next time. if not query_map: - defer.returnValue((stream_id_cutoff, [])) + return (stream_id_cutoff, []) results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - defer.returnValue((now_stream_id, results)) + return (now_stream_id, results) def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id, limit @@ -232,7 +232,7 @@ class DeviceWorkerStore(SQLBaseStore): results.append(result) - defer.returnValue(results) + return results def _get_last_device_update_for_remote_user( self, destination, user_id, from_stream_id @@ -330,7 +330,7 @@ class DeviceWorkerStore(SQLBaseStore): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - defer.returnValue((user_ids_not_in_cache, results)) + return (user_ids_not_in_cache, results) @cachedInlineCallbacks(num_args=2, tree=True) def _get_cached_user_device(self, user_id, device_id): @@ -340,7 +340,7 @@ class DeviceWorkerStore(SQLBaseStore): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(db_to_json(content)) + return db_to_json(content) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -350,9 +350,9 @@ class DeviceWorkerStore(SQLBaseStore): retcols=("device_id", "content"), desc="_get_cached_devices_for_user", ) - defer.returnValue( - {device["device_id"]: db_to_json(device["content"]) for device in devices} - ) + return { + device["device_id"]: db_to_json(device["content"]) for device in devices + } def get_devices_with_keys_by_user(self, user_id): """Get all devices (with any device keys) for a user @@ -482,7 +482,7 @@ class DeviceWorkerStore(SQLBaseStore): results = {user_id: None for user_id in user_ids} results.update({row["user_id"]: row["stream_id"] for row in rows}) - defer.returnValue(results) + return results class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): @@ -543,7 +543,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): """ key = (user_id, device_id) if self.device_id_exists_cache.get(key, None): - defer.returnValue(False) + return False try: inserted = yield self._simple_insert( @@ -557,7 +557,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): or_ignore=True, ) self.device_id_exists_cache.prefill(key, True) - defer.returnValue(inserted) + return inserted except Exception as e: logger.error( "store_device with device_id=%s(%r) user_id=%s(%r)" @@ -780,7 +780,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): hosts, stream_id, ) - defer.returnValue(stream_id) + return stream_id def _add_device_change_txn(self, txn, user_id, device_ids, hosts, stream_id): now = self._clock.time_msec() @@ -889,4 +889,4 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): yield self.runWithConnection(f) yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 201bbd430c..e966a73f3d 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -46,7 +46,7 @@ class DirectoryWorkerStore(SQLBaseStore): ) if not room_id: - defer.returnValue(None) + return None return servers = yield self._simple_select_onecol( @@ -57,10 +57,10 @@ class DirectoryWorkerStore(SQLBaseStore): ) if not servers: - defer.returnValue(None) + return None return - defer.returnValue(RoomAliasMapping(room_id, room_alias.to_string(), servers)) + return RoomAliasMapping(room_id, room_alias.to_string(), servers) def get_room_alias_creator(self, room_alias): return self._simple_select_one_onecol( @@ -125,7 +125,7 @@ class DirectoryStore(DirectoryWorkerStore): raise SynapseError( 409, "Room alias %s already exists" % room_alias.to_string() ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def delete_room_alias(self, room_alias): @@ -133,7 +133,7 @@ class DirectoryStore(DirectoryWorkerStore): "delete_room_alias", self._delete_room_alias_txn, room_alias ) - defer.returnValue(room_id) + return room_id def _delete_room_alias_txn(self, txn, room_alias): txn.execute( diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index f40ef2ab64..99128f2df7 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -61,7 +61,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): row["session_data"] = json.loads(row["session_data"]) - defer.returnValue(row) + return row @defer.inlineCallbacks def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): @@ -118,7 +118,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): try: version = int(version) except ValueError: - defer.returnValue({"rooms": {}}) + return {"rooms": {}} keyvalues = {"user_id": user_id, "version": version} if room_id: @@ -151,7 +151,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "session_data": json.loads(row["session_data"]), } - defer.returnValue(sessions) + return sessions @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 2fabb9e2cb..1e07474e70 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -41,7 +41,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): dict containing "key_json", "device_display_name". """ if not query_list: - defer.returnValue({}) + return {} results = yield self.runInteraction( "get_e2e_device_keys", @@ -55,7 +55,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): for device_id, device_info in iteritems(device_keys): device_info["keys"] = db_to_json(device_info.pop("key_json")) - defer.returnValue(results) + return results def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False @@ -130,9 +130,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): desc="add_e2e_one_time_keys_check", ) - defer.returnValue( - {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} - ) + return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index cb4478342f..4f500d893e 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -131,9 +131,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas ) if not rows: - defer.returnValue(0) + return 0 else: - defer.returnValue(max(row["depth"] for row in rows)) + return max(row["depth"] for row in rows) def _get_oldest_events_in_room_txn(self, txn, room_id): return self._simple_select_onecol_txn( @@ -169,7 +169,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas # make sure that we don't completely ignore the older events. res = res[0:5] + random.sample(res[5:], 5) - defer.returnValue(res) + return res def get_latest_event_ids_and_hashes_in_room(self, room_id): """ @@ -411,7 +411,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas limit, ) events = yield self.get_events_as_list(ids) - defer.returnValue(events) + return events def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit): @@ -463,7 +463,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas desc="get_successor_events", ) - defer.returnValue([row["event_id"] for row in rows]) + return [row["event_id"] for row in rows] class EventFederationStore(EventFederationWorkerStore): @@ -654,4 +654,4 @@ class EventFederationStore(EventFederationWorkerStore): if not result: yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY) - defer.returnValue(batch_size) + return batch_size diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index dcfb67e029..22025effbc 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -100,7 +100,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): user_id, last_read_event_id, ) - defer.returnValue(ret) + return ret def _get_unread_counts_by_receipt_txn( self, txn, room_id, user_id, last_read_event_id @@ -178,7 +178,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): return [r[0] for r in txn] ret = yield self.runInteraction("get_push_action_users_in_range", f) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_http( @@ -279,7 +279,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): # Take only up to the limit. We have to stop at the limit because # one of the subqueries may have hit the limit. - defer.returnValue(notifs[:limit]) + return notifs[:limit] @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_email( @@ -380,7 +380,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): notifs.sort(key=lambda r: -(r["received_ts"] or 0)) # Now return the first `limit` - defer.returnValue(notifs[:limit]) + return notifs[:limit] def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering): """A fast check to see if there might be something to push for the @@ -477,7 +477,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): keyvalues={"event_id": event_id}, desc="remove_push_actions_from_staging", ) - defer.returnValue(res) + return res except Exception: # this method is called from an exception handler, so propagating # another exception here really isn't helpful - there's nothing @@ -732,7 +732,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): push_actions = yield self.runInteraction("get_push_actions_for_user", f) for pa in push_actions: pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) - defer.returnValue(push_actions) + return push_actions @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): @@ -749,7 +749,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): return txn.fetchone() result = yield self.runInteraction("get_time_of_last_push_action_before", f) - defer.returnValue(result[0] if result else None) + return result[0] if result else None @defer.inlineCallbacks def get_latest_push_action_stream_ordering(self): @@ -758,7 +758,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): return txn.fetchone() result = yield self.runInteraction("get_latest_push_action_stream_ordering", f) - defer.returnValue(result[0] or 0) + return result[0] or 0 def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b70457bfc6..88c0180116 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -223,7 +223,7 @@ def _retry_on_integrity_error(func): except self.database_engine.module.IntegrityError: logger.exception("IntegrityError, retrying.") res = yield func(self, *args, delete_existing=True, **kwargs) - defer.returnValue(res) + return res return f @@ -309,7 +309,7 @@ class EventsStore( max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue(max_persisted_id) + return max_persisted_id @defer.inlineCallbacks @log_function @@ -334,7 +334,7 @@ class EventsStore( yield make_deferred_yieldable(deferred) max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id)) + return (event.internal_metadata.stream_ordering, max_persisted_id) def _maybe_start_persisting(self, room_id): @defer.inlineCallbacks @@ -595,7 +595,7 @@ class EventsStore( stale = latest_event_ids & result stale_forward_extremities_counter.observe(len(stale)) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_events_which_are_prevs(self, event_ids): @@ -633,7 +633,7 @@ class EventsStore( "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk ) - defer.returnValue(results) + return results @defer.inlineCallbacks def _get_prevs_before_rejected(self, event_ids): @@ -695,7 +695,7 @@ class EventsStore( "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk ) - defer.returnValue(existing_prevs) + return existing_prevs @defer.inlineCallbacks def _get_new_state_after_events( @@ -796,7 +796,7 @@ class EventsStore( # If they old and new groups are the same then we don't need to do # anything. if old_state_groups == new_state_groups: - defer.returnValue((None, None)) + return (None, None) if len(new_state_groups) == 1 and len(old_state_groups) == 1: # If we're going from one state group to another, lets check if @@ -813,7 +813,7 @@ class EventsStore( # the current state in memory then lets also return that, # but it doesn't matter if we don't. new_state = state_groups_map.get(new_state_group) - defer.returnValue((new_state, delta_ids)) + return (new_state, delta_ids) # Now that we have calculated new_state_groups we need to get # their state IDs so we can resolve to a single state set. @@ -825,7 +825,7 @@ class EventsStore( if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - defer.returnValue((state_groups_map[new_state_groups.pop()], None)) + return (state_groups_map[new_state_groups.pop()], None) # Ok, we need to defer to the state handler to resolve our state sets. @@ -854,7 +854,7 @@ class EventsStore( state_res_store=StateResolutionStore(self), ) - defer.returnValue((res.state, None)) + return (res.state, None) @defer.inlineCallbacks def _calculate_state_delta(self, room_id, current_state): @@ -877,7 +877,7 @@ class EventsStore( if ev_id != existing_state.get(key) } - defer.returnValue((to_delete, to_insert)) + return (to_delete, to_insert) @log_function def _persist_events_txn( @@ -1564,7 +1564,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_sent_messages(self): @@ -1585,7 +1585,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_daily_sent_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_active_rooms(self): @@ -1600,7 +1600,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_daily_active_rooms", _count) - defer.returnValue(ret) + return ret def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" @@ -2183,7 +2183,7 @@ class EventsStore( """ to_1, so_1 = yield self._get_event_ordering(event_id1) to_2, so_2 = yield self._get_event_ordering(event_id2) - defer.returnValue((to_1, so_1) > (to_2, so_2)) + return (to_1, so_1) > (to_2, so_2) @cachedInlineCallbacks(max_entries=5000) def _get_event_ordering(self, event_id): @@ -2197,9 +2197,7 @@ class EventsStore( if not res: raise SynapseError(404, "Could not find event %s" % (event_id,)) - defer.returnValue( - (int(res["topological_ordering"]), int(res["stream_ordering"])) - ) + return (int(res["topological_ordering"]), int(res["stream_ordering"])) def get_all_updated_current_state_deltas(self, from_token, to_token, limit): def get_all_updated_current_state_deltas_txn(txn): diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 1ce21d190c..6587f31e2b 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -135,7 +135,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_origin_server_ts(self, progress, batch_size): @@ -212,7 +212,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _cleanup_extremities_bg_update(self, progress, batch_size): @@ -396,4 +396,4 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): "_cleanup_extremities_bg_update_drop_table", _drop_table_txn ) - defer.returnValue(num_handled) + return num_handled diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 858fc755a1..44441957db 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -157,7 +157,7 @@ class EventsWorkerStore(SQLBaseStore): if event is None and not allow_none: raise NotFoundError("Could not find event %s" % (event_id,)) - defer.returnValue(event) + return event @defer.inlineCallbacks def get_events( @@ -187,7 +187,7 @@ class EventsWorkerStore(SQLBaseStore): allow_rejected=allow_rejected, ) - defer.returnValue({e.event_id: e for e in events}) + return {e.event_id: e for e in events} @defer.inlineCallbacks def get_events_as_list( @@ -217,7 +217,7 @@ class EventsWorkerStore(SQLBaseStore): """ if not event_ids: - defer.returnValue([]) + return [] # there may be duplicates so we cast the list to a set event_entry_map = yield self._get_events_from_cache_or_db( @@ -305,7 +305,7 @@ class EventsWorkerStore(SQLBaseStore): event.unsigned["prev_content"] = prev.content event.unsigned["prev_sender"] = prev.sender - defer.returnValue(events) + return events @defer.inlineCallbacks def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): @@ -452,7 +452,7 @@ class EventsWorkerStore(SQLBaseStore): without having to create a new transaction for each request for events. """ if not events: - defer.returnValue({}) + return {} events_d = defer.Deferred() with self._event_fetch_lock: @@ -496,7 +496,7 @@ class EventsWorkerStore(SQLBaseStore): ) ) - defer.returnValue({e.event.event_id: e for e in res if e}) + return {e.event.event_id: e for e in res if e} def _fetch_event_rows(self, txn, event_ids): """Fetch event rows from the database @@ -609,7 +609,7 @@ class EventsWorkerStore(SQLBaseStore): self._get_event_cache.prefill((original_ev.event_id,), cache_entry) - defer.returnValue(cache_entry) + return cache_entry @defer.inlineCallbacks def _maybe_redact_event_row(self, original_ev, redactions): @@ -679,7 +679,7 @@ class EventsWorkerStore(SQLBaseStore): desc="have_events_in_timeline", ) - defer.returnValue(set(r["event_id"] for r in rows)) + return set(r["event_id"] for r in rows) @defer.inlineCallbacks def have_seen_events(self, event_ids): @@ -705,7 +705,7 @@ class EventsWorkerStore(SQLBaseStore): input_iterator = iter(event_ids) for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)), []): yield self.runInteraction("have_seen_events", have_seen_events_txn, chunk) - defer.returnValue(results) + return results def get_seen_events_with_rejections(self, event_ids): """Given a list of event ids, check if we rejected them. @@ -816,4 +816,4 @@ class EventsWorkerStore(SQLBaseStore): # it. complexity_v1 = round(state_events / 500, 2) - defer.returnValue({"v1": complexity_v1}) + return {"v1": complexity_v1} diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index b195dc66a0..23b48f6cea 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -15,8 +15,6 @@ from canonicaljson import encode_canonical_json -from twisted.internet import defer - from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -41,7 +39,7 @@ class FilteringStore(SQLBaseStore): desc="get_user_filter", ) - defer.returnValue(db_to_json(def_json)) + return db_to_json(def_json) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 73e6fc6de2..15b01c6958 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -307,15 +307,13 @@ class GroupServerStore(SQLBaseStore): desc="get_group_categories", ) - defer.returnValue( - { - row["category_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_category(self, group_id, category_id): @@ -328,7 +326,7 @@ class GroupServerStore(SQLBaseStore): category["profile"] = json.loads(category["profile"]) - defer.returnValue(category) + return category def upsert_group_category(self, group_id, category_id, profile, is_public): """Add/update room category for group @@ -370,15 +368,13 @@ class GroupServerStore(SQLBaseStore): desc="get_group_roles", ) - defer.returnValue( - { - row["role_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_role(self, group_id, role_id): @@ -391,7 +387,7 @@ class GroupServerStore(SQLBaseStore): role["profile"] = json.loads(role["profile"]) - defer.returnValue(role) + return role def upsert_group_role(self, group_id, role_id, profile, is_public): """Add/remove user role @@ -960,7 +956,7 @@ class GroupServerStore(SQLBaseStore): _register_user_group_membership_txn, next_id, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group( @@ -1057,9 +1053,9 @@ class GroupServerStore(SQLBaseStore): now = int(self._clock.time_msec()) if row and now < row["valid_until_ms"]: - defer.returnValue(json.loads(row["attestation_json"])) + return json.loads(row["attestation_json"]) - defer.returnValue(None) + return None def get_joined_groups(self, user_id): return self._simple_select_onecol( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 081564360f..752e9788a2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -173,7 +173,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): ) if user_id: count = count + 1 - defer.returnValue(count) + return count @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 42ec8c6bb8..1a0f2d5768 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -90,9 +90,7 @@ class PresenceStore(SQLBaseStore): presence_states, ) - defer.returnValue( - (stream_orderings[-1], self._presence_id_gen.get_current_token()) - ) + return (stream_orderings[-1], self._presence_id_gen.get_current_token()) def _update_presence_txn(self, txn, stream_orderings, presence_states): for stream_id, state in zip(stream_orderings, presence_states): @@ -180,7 +178,7 @@ class PresenceStore(SQLBaseStore): for row in rows: row["currently_active"] = bool(row["currently_active"]) - defer.returnValue({row["user_id"]: UserPresenceState(**row) for row in rows}) + return {row["user_id"]: UserPresenceState(**row) for row in rows} def get_current_presence_token(self): return self._presence_id_gen.get_current_token() diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 0ff392bdb4..8a5d8e9b18 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -34,15 +34,13 @@ class ProfileWorkerStore(SQLBaseStore): except StoreError as e: if e.code == 404: # no match - defer.returnValue(ProfileInfo(None, None)) + return ProfileInfo(None, None) return else: raise - defer.returnValue( - ProfileInfo( - avatar_url=profile["avatar_url"], display_name=profile["displayname"] - ) + return ProfileInfo( + avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) def get_profile_displayname(self, user_localpart): @@ -168,7 +166,7 @@ class ProfileStore(ProfileWorkerStore): ) if res: - defer.returnValue(True) + return True res = yield self._simple_select_one_onecol( table="group_invites", @@ -179,4 +177,4 @@ class ProfileStore(ProfileWorkerStore): ) if res: - defer.returnValue(True) + return True diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 98cec8c82b..a6517c4cf3 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -120,7 +120,7 @@ class PushRulesWorkerStore( rules = _load_rules(rows, enabled_map) - defer.returnValue(rules) + return rules @cachedInlineCallbacks(max_entries=5000) def get_push_rules_enabled_for_user(self, user_id): @@ -130,9 +130,7 @@ class PushRulesWorkerStore( retcols=("user_name", "rule_id", "enabled"), desc="get_push_rules_enabled_for_user", ) - defer.returnValue( - {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} - ) + return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} def have_push_rules_changed_for_user(self, user_id, last_id): if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id): @@ -160,7 +158,7 @@ class PushRulesWorkerStore( ) def bulk_get_push_rules(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: [] for user_id in user_ids} @@ -182,7 +180,7 @@ class PushRulesWorkerStore( for user_id, rules in results.items(): results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) - defer.returnValue(results) + return results @defer.inlineCallbacks def move_push_rule_from_room_to_room(self, new_room_id, user_id, rule): @@ -253,7 +251,7 @@ class PushRulesWorkerStore( result = yield self._bulk_get_push_rules_for_room( event.room_id, state_group, current_state_ids, event=event ) - defer.returnValue(result) + return result @cachedInlineCallbacks(num_args=2, cache_context=True) def _bulk_get_push_rules_for_room( @@ -312,7 +310,7 @@ class PushRulesWorkerStore( rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} - defer.returnValue(rules_by_user) + return rules_by_user @cachedList( cached_method_name="get_push_rules_enabled_for_user", @@ -322,7 +320,7 @@ class PushRulesWorkerStore( ) def bulk_get_push_rules_enabled(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: {} for user_id in user_ids} @@ -336,7 +334,7 @@ class PushRulesWorkerStore( for row in rows: enabled = bool(row["enabled"]) results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled - defer.returnValue(results) + return results class PushRuleStore(PushRulesWorkerStore): diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index cfe0a94330..be3d4d9ded 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -63,7 +63,7 @@ class PusherWorkerStore(SQLBaseStore): ret = yield self._simple_select_one_onecol( "pushers", {"user_name": user_id}, "id", allow_none=True ) - defer.returnValue(ret is not None) + return ret is not None def get_pushers_by_app_id_and_pushkey(self, app_id, pushkey): return self.get_pushers_by({"app_id": app_id, "pushkey": pushkey}) @@ -95,7 +95,7 @@ class PusherWorkerStore(SQLBaseStore): ], desc="get_pushers_by", ) - defer.returnValue(self._decode_pushers_rows(ret)) + return self._decode_pushers_rows(ret) @defer.inlineCallbacks def get_all_pushers(self): @@ -106,7 +106,7 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(rows) rows = yield self.runInteraction("get_all_pushers", get_pushers) - defer.returnValue(rows) + return rows def get_all_updated_pushers(self, last_id, current_id, limit): if last_id == current_id: @@ -205,7 +205,7 @@ class PusherWorkerStore(SQLBaseStore): result = {user_id: False for user_id in user_ids} result.update({r["user_name"]: True for r in rows}) - defer.returnValue(result) + return result class PusherStore(PusherWorkerStore): @@ -343,7 +343,7 @@ class PusherStore(PusherWorkerStore): "throttle_ms": row["throttle_ms"], } - defer.returnValue(params_by_room) + return params_by_room @defer.inlineCallbacks def set_throttle_params(self, pusher_id, room_id, params): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b477da12b1..6aa6d98ebb 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore): @cachedInlineCallbacks() def get_users_with_read_receipts_in_room(self, room_id): receipts = yield self.get_receipts_for_room(room_id, "m.read") - defer.returnValue(set(r["user_id"] for r in receipts)) + return set(r["user_id"] for r in receipts) @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -92,7 +92,7 @@ class ReceiptsWorkerStore(SQLBaseStore): desc="get_receipts_for_user", ) - defer.returnValue({row["room_id"]: row["event_id"] for row in rows}) + return {row["room_id"]: row["event_id"] for row in rows} @defer.inlineCallbacks def get_receipts_for_user_with_orderings(self, user_id, receipt_type): @@ -110,16 +110,14 @@ class ReceiptsWorkerStore(SQLBaseStore): return txn.fetchall() rows = yield self.runInteraction("get_receipts_for_user_with_orderings", f) - defer.returnValue( - { - row[0]: { - "event_id": row[1], - "topological_ordering": row[2], - "stream_ordering": row[3], - } - for row in rows + return { + row[0]: { + "event_id": row[1], + "topological_ordering": row[2], + "stream_ordering": row[3], } - ) + for row in rows + } @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): @@ -147,7 +145,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_ids, to_key, from_key=from_key ) - defer.returnValue([ev for res in results.values() for ev in res]) + return [ev for res in results.values() for ev in res] def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None): """Get receipts for a single room for sending to clients. @@ -197,7 +195,7 @@ class ReceiptsWorkerStore(SQLBaseStore): rows = yield self.runInteraction("get_linearized_receipts_for_room", f) if not rows: - defer.returnValue([]) + return [] content = {} for row in rows: @@ -205,9 +203,7 @@ class ReceiptsWorkerStore(SQLBaseStore): row["user_id"] ] = json.loads(row["data"]) - defer.returnValue( - [{"type": "m.receipt", "room_id": room_id, "content": content}] - ) + return [{"type": "m.receipt", "room_id": room_id, "content": content}] @cachedList( cached_method_name="_get_linearized_receipts_for_room", @@ -217,7 +213,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): if not room_ids: - defer.returnValue({}) + return {} def f(txn): if from_key: @@ -264,7 +260,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } - defer.returnValue(results) + return results def get_all_updated_receipts(self, last_id, current_id, limit=None): if last_id == current_id: @@ -468,7 +464,7 @@ class ReceiptsStore(ReceiptsWorkerStore): ) if event_ts is None: - defer.returnValue(None) + return None now = self._clock.time_msec() logger.debug( @@ -482,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore): max_persisted_id = self._receipts_id_gen.get_current_token() - defer.returnValue((stream_id, max_persisted_id)) + return (stream_id, max_persisted_id) def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 8b2c2a97ab..999c10a308 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -75,12 +75,12 @@ class RegistrationWorkerStore(SQLBaseStore): info = yield self.get_user_by_id(user_id) if not info: - defer.returnValue(False) + return False now = self.clock.time_msec() trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms - defer.returnValue(is_trial) + return is_trial @cached() def get_user_by_access_token(self, token): @@ -115,7 +115,7 @@ class RegistrationWorkerStore(SQLBaseStore): allow_none=True, desc="get_expiration_ts_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_account_validity_for_user( @@ -190,7 +190,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="get_user_from_renewal_token", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_renewal_token_for_user(self, user_id): @@ -209,7 +209,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="get_renewal_token_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_expiring_soon(self): @@ -237,7 +237,7 @@ class RegistrationWorkerStore(SQLBaseStore): self.config.account_validity.renew_at, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_renewal_mail_status(self, user_id, email_sent): @@ -280,7 +280,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="is_server_admin", ) - defer.returnValue(res if res else False) + return res if res else False def _query_for_auth(self, txn, token): sql = ( @@ -311,7 +311,7 @@ class RegistrationWorkerStore(SQLBaseStore): res = yield self.runInteraction( "is_support_user", self.is_support_user_txn, user_id ) - defer.returnValue(res) + return res def is_support_user_txn(self, txn, user_id): res = self._simple_select_one_onecol_txn( @@ -349,7 +349,7 @@ class RegistrationWorkerStore(SQLBaseStore): return 0 ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret def count_daily_user_type(self): """ @@ -395,7 +395,7 @@ class RegistrationWorkerStore(SQLBaseStore): return count ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def find_next_generated_user_id_localpart(self): @@ -425,7 +425,7 @@ class RegistrationWorkerStore(SQLBaseStore): if i not in found: return i - defer.returnValue( + return ( ( yield self.runInteraction( "find_next_generated_user_id", _find_next_generated_user_id @@ -447,7 +447,7 @@ class RegistrationWorkerStore(SQLBaseStore): user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address ) - defer.returnValue(user_id) + return user_id def get_user_id_by_threepid_txn(self, txn, medium, address): """Returns user id from threepid @@ -487,7 +487,7 @@ class RegistrationWorkerStore(SQLBaseStore): ["medium", "address", "validated_at", "added_at"], "user_get_threepids", ) - defer.returnValue(ret) + return ret def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( @@ -677,7 +677,7 @@ class RegistrationStore( if end: yield self._end_background_update("users_set_deactivated_flag") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms): @@ -957,7 +957,7 @@ class RegistrationStore( desc="is_guest", ) - defer.returnValue(res if res else False) + return res if res else False def add_user_pending_deactivation(self, user_id): """ @@ -1024,7 +1024,7 @@ class RegistrationStore( yield self._end_background_update("user_threepids_grandfather") - defer.returnValue(1) + return 1 def get_threepid_validation_session( self, medium, client_secret, address=None, sid=None, validated=True @@ -1337,4 +1337,4 @@ class RegistrationStore( ) # Convert the integer into a boolean. - defer.returnValue(res == 1) + return res == 1 diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 9954bc094f..fcb5f2f23a 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -17,8 +17,6 @@ import logging import attr -from twisted.internet import defer - from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore @@ -363,7 +361,7 @@ class RelationsWorkerStore(SQLBaseStore): return edit_event = yield self.get_event(edit_id, allow_none=True) - defer.returnValue(edit_event) + return edit_event def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender): """Check if a user has already annotated an event with the same key diff --git a/synapse/storage/room.py b/synapse/storage/room.py index fe9d79d792..bc606292b8 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -193,14 +193,12 @@ class RoomWorkerStore(SQLBaseStore): ) if row: - defer.returnValue( - RatelimitOverride( - messages_per_second=row["messages_per_second"], - burst_count=row["burst_count"], - ) + return RatelimitOverride( + messages_per_second=row["messages_per_second"], + burst_count=row["burst_count"], ) else: - defer.returnValue(None) + return None class RoomStore(RoomWorkerStore, SearchStore): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b3c002b9eb..cb88e49b51 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -108,7 +108,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): room_id, on_invalidate=cache_context.invalidate ) hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids) - defer.returnValue(hosts) + return hosts @cached(max_entries=100000, iterable=True) def get_users_in_room(self, room_id): @@ -253,8 +253,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): invites = yield self.get_invited_rooms_for_user(user_id) for invite in invites: if invite.room_id == room_id: - defer.returnValue(invite) - defer.returnValue(None) + return invite + return None def get_rooms_for_user_where_membership_is(self, user_id, membership_list): """ Get all the rooms for this user where the membership for this user @@ -347,11 +347,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): rooms = yield self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN] ) - defer.returnValue( - frozenset( - GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) - for r in rooms - ) + return frozenset( + GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) + for r in rooms ) @defer.inlineCallbacks @@ -361,7 +359,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): rooms = yield self.get_rooms_for_user_with_stream_ordering( user_id, on_invalidate=on_invalidate ) - defer.returnValue(frozenset(r.room_id for r in rooms)) + return frozenset(r.room_id for r in rooms) @cachedInlineCallbacks(max_entries=500000, cache_context=True, iterable=True) def get_users_who_share_room_with_user(self, user_id, cache_context): @@ -378,7 +376,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) user_who_share_room.update(user_ids) - defer.returnValue(user_who_share_room) + return user_who_share_room @defer.inlineCallbacks def get_joined_users_from_context(self, event, context): @@ -394,7 +392,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): result = yield self._get_joined_users_from_context( event.room_id, state_group, current_state_ids, event=event, context=context ) - defer.returnValue(result) + return result def get_joined_users_from_state(self, room_id, state_entry): state_group = state_entry.state_group @@ -508,7 +506,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): avatar_url=to_ascii(event.content.get("avatar_url", None)), ) - defer.returnValue(users_in_room) + return users_in_room @cachedInlineCallbacks(max_entries=10000) def is_host_joined(self, room_id, host): @@ -533,14 +531,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): rows = yield self._execute("is_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True @cachedInlineCallbacks() def was_host_joined(self, room_id, host): @@ -573,14 +571,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): rows = yield self._execute("was_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True def get_joined_hosts(self, room_id, state_entry): state_group = state_entry.state_group @@ -607,7 +605,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): cache = self._get_joined_hosts_cache(room_id) joined_hosts = yield cache.get_destinations(state_entry) - defer.returnValue(joined_hosts) + return joined_hosts @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id): @@ -637,7 +635,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): return rows[0][0] count = yield self.runInteraction("did_forget_membership", f) - defer.returnValue(count == 0) + return count == 0 @defer.inlineCallbacks def get_rooms_user_has_been_in(self, user_id): @@ -847,7 +845,7 @@ class RoomMemberStore(RoomMemberWorkerStore): if not result: yield self._end_background_update(_MEMBERSHIP_PROFILE_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_current_state_membership(self, progress, batch_size): @@ -905,7 +903,7 @@ class RoomMemberStore(RoomMemberWorkerStore): if finished: yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) - defer.returnValue(row_count) + return row_count class _JoinedHostsCache(object): @@ -933,7 +931,7 @@ class _JoinedHostsCache(object): state_entry(synapse.state._StateCacheEntry) """ if state_entry.state_group == self.state_group: - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) with (yield self.linearizer.queue(())): if state_entry.state_group == self.state_group: @@ -970,7 +968,7 @@ class _JoinedHostsCache(object): else: self.state_group = object() self._len = sum(len(v) for v in itervalues(self.hosts_to_joined_users)) - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) def __len__(self): return self._len diff --git a/synapse/storage/search.py b/synapse/storage/search.py index f3b1cec933..df87ab6a6d 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -166,7 +166,7 @@ class SearchStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_gin_search(self, progress, batch_size): @@ -209,7 +209,7 @@ class SearchStore(BackgroundUpdateStore): yield self.runWithConnection(create_index) yield self._end_background_update(self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME) - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _background_reindex_search_order(self, progress, batch_size): @@ -287,7 +287,7 @@ class SearchStore(BackgroundUpdateStore): if not finished: yield self._end_background_update(self.EVENT_SEARCH_ORDER_UPDATE_NAME) - defer.returnValue(num_rows) + return num_rows def store_event_search_txn(self, txn, event, key, value): """Add event to the search table @@ -454,17 +454,15 @@ class SearchStore(BackgroundUpdateStore): count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - {"event": event_map[r["event_id"]], "rank": r["rank"]} - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + {"event": event_map[r["event_id"]], "rank": r["rank"]} + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } @defer.inlineCallbacks def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None): @@ -599,22 +597,20 @@ class SearchStore(BackgroundUpdateStore): count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - { - "event": event_map[r["event_id"]], - "rank": r["rank"], - "pagination_token": "%s,%s" - % (r["origin_server_ts"], r["stream_ordering"]), - } - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + { + "event": event_map[r["event_id"]], + "rank": r["rank"], + "pagination_token": "%s,%s" + % (r["origin_server_ts"], r["stream_ordering"]), + } + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } def _find_highlights_in_postgres(self, search_query, events): """Given a list of events and a search term, return a list of words diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 6bd81e84ad..fb83218f90 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -59,7 +59,7 @@ class SignatureWorkerStore(SQLBaseStore): for e_id, h in hashes.items() } - defer.returnValue(list(hashes.items())) + return list(hashes.items()) def _get_event_reference_hashes_txn(self, txn, event_id): """Get all the hashes for a given PDU. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index a35289876d..1980a87108 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -422,7 +422,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Retrieve the room's create event create_event = yield self.get_create_event_for_room(room_id) - defer.returnValue(create_event.content.get("room_version", "1")) + return create_event.content.get("room_version", "1") @defer.inlineCallbacks def get_room_predecessor(self, room_id): @@ -442,7 +442,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): create_event = yield self.get_create_event_for_room(room_id) # Return predecessor if present - defer.returnValue(create_event.content.get("predecessor", None)) + return create_event.content.get("predecessor", None) @defer.inlineCallbacks def get_create_event_for_room(self, room_id): @@ -466,7 +466,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Retrieve the room's create event and return create_event = yield self.get_event(create_id) - defer.returnValue(create_event) + return create_event @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): @@ -563,7 +563,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): if not event: return - defer.returnValue(event.content.get("canonical_alias")) + return event.content.get("canonical_alias") @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): @@ -613,14 +613,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): dict of state_group_id -> (dict of (type, state_key) -> event id) """ if not event_ids: - defer.returnValue({}) + return {} event_to_groups = yield self._get_state_group_for_events(event_ids) groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups) - defer.returnValue(group_to_state) + return group_to_state @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): @@ -634,7 +634,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ group_to_state = yield self._get_state_for_groups((state_group,)) - defer.returnValue(group_to_state[state_group]) + return group_to_state[state_group] @defer.inlineCallbacks def get_state_groups(self, room_id, event_ids): @@ -645,7 +645,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): dict of state_group_id -> list of state events. """ if not event_ids: - defer.returnValue({}) + return {} group_to_ids = yield self.get_state_groups_ids(room_id, event_ids) @@ -658,16 +658,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): get_prev_content=False, ) - defer.returnValue( - { - group: [ - state_event_map[v] - for v in itervalues(event_id_map) - if v in state_event_map - ] - for group, event_id_map in iteritems(group_to_ids) - } - ) + return { + group: [ + state_event_map[v] + for v in itervalues(event_id_map) + if v in state_event_map + ] + for group, event_id_map in iteritems(group_to_ids) + } @defer.inlineCallbacks def _get_state_groups_from_groups(self, groups, state_filter): @@ -694,7 +692,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) results.update(res) - defer.returnValue(results) + return results def _get_state_groups_from_groups_txn( self, txn, groups, state_filter=StateFilter.all() @@ -829,7 +827,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): @@ -855,7 +853,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_for_event(self, event_id, state_filter=StateFilter.all()): @@ -871,7 +869,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @defer.inlineCallbacks def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): @@ -887,7 +885,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_ids_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @cached(max_entries=50000) def _get_state_group_for_event(self, event_id): @@ -917,7 +915,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): desc="_get_state_group_for_events", ) - defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) + return {row["event_id"]: row["state_group"] for row in rows} def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` @@ -997,7 +995,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: - defer.returnValue(state) + return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence @@ -1024,7 +1022,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # everything we need from the database anyway. state[group] = state_filter.filter_state(group_state_dict) - defer.returnValue(state) + return state def _get_state_for_groups_using_cache(self, groups, cache, state_filter): """Gets the state at each of a list of state groups, optionally @@ -1498,7 +1496,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME ) - defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR) + return result * BATCH_SIZE_SCALE_FACTOR @defer.inlineCallbacks def _background_index_state(self, progress, batch_size): @@ -1528,4 +1526,4 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1cec84ee2e..e893b05ee7 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -66,7 +66,7 @@ class StatsStore(StateDeltasStore): if not self.stats_enabled: yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 # Get all the rooms that we want to process. def _make_staging_area(txn): @@ -120,7 +120,7 @@ class StatsStore(StateDeltasStore): self.get_earliest_token_for_room_stats.invalidate_all() yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_cleanup(self, progress, batch_size): @@ -129,7 +129,7 @@ class StatsStore(StateDeltasStore): """ if not self.stats_enabled: yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 position = yield self._simple_select_one_onecol( TEMP_TABLE + "_position", None, "position" @@ -143,14 +143,14 @@ class StatsStore(StateDeltasStore): yield self.runInteraction("populate_stats_cleanup", _delete_staging_area) yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_process_rooms(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 # If we don't have progress filed, delete everything. if not progress: @@ -186,7 +186,7 @@ class StatsStore(StateDeltasStore): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining", @@ -303,9 +303,9 @@ class StatsStore(StateDeltasStore): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count def delete_all_stats(self): """ diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index a0465484df..856c2ee8d8 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -300,7 +300,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not room_ids: - defer.returnValue({}) + return {} results = {} room_ids = list(room_ids) @@ -323,7 +323,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) results.update(dict(zip(rm_ids, res))) - defer.returnValue(results) + return results def get_rooms_that_changed(self, room_ids, from_key): """Given a list of rooms and a token, return rooms where there may have @@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): the chunk of events returned. """ if from_key == to_key: - defer.returnValue(([], from_key)) + return ([], from_key) from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream @@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not has_changed: - defer.returnValue(([], from_key)) + return ([], from_key) def f(txn): sql = ( @@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # get. key = from_key - defer.returnValue((ret, key)) + return (ret, key) @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): @@ -415,14 +415,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: - defer.returnValue([]) + return [] if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: - defer.returnValue([]) + return [] def f(txn): sql = ( @@ -447,7 +447,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(ret, rows, topo_order=False) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_recent_events_for_room(self, room_id, limit, end_token): @@ -477,7 +477,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token): @@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ # Allow a zero limit here, and no-op. if limit == 0: - defer.returnValue(([], end_token)) + return ([], end_token) end_token = RoomStreamToken.parse(end_token) @@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # We want to return the results in ascending order. rows.reverse() - defer.returnValue((rows, token)) + return (rows, token) def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -549,12 +549,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ token = yield self.get_room_max_stream_ordering() if room_id is None: - defer.returnValue("s%d" % (token,)) + return "s%d" % (token,) else: topo = yield self.runInteraction( "_get_max_topological_txn", self._get_max_topological_txn, room_id ) - defer.returnValue("t%d-%d" % (topo, token)) + return "t%d-%d" % (topo, token) def get_stream_token_for_event(self, event_id): """The stream token for an event @@ -674,14 +674,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [e for e in results["after"]["event_ids"]], get_prev_content=True ) - defer.returnValue( - { - "events_before": events_before, - "events_after": events_after, - "start": results["before"]["token"], - "end": results["after"]["token"], - } - ) + return { + "events_before": events_before, + "events_after": events_after, + "start": results["before"]["token"], + "end": results["after"]["token"], + } def _get_events_around_txn( self, txn, room_id, event_id, before_limit, after_limit, event_filter @@ -785,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) def get_federation_out_pos(self, typ): return self._simple_select_one_onecol( @@ -939,7 +937,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) class StreamStore(StreamWorkerStore): diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index e88f8ea35f..20dd6bd53d 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -66,7 +66,7 @@ class TagsWorkerStore(AccountDataWorkerStore): room_id string, tag string and content string. """ if last_id == current_id: - defer.returnValue([]) + return [] def get_all_updated_tags_txn(txn): sql = ( @@ -107,7 +107,7 @@ class TagsWorkerStore(AccountDataWorkerStore): ) results.extend(tags) - defer.returnValue(results) + return results @defer.inlineCallbacks def get_updated_tags(self, user_id, stream_id): @@ -135,7 +135,7 @@ class TagsWorkerStore(AccountDataWorkerStore): user_id, int(stream_id) ) if not changed: - defer.returnValue({}) + return {} room_ids = yield self.runInteraction("get_updated_tags", get_updated_tags_txn) @@ -145,7 +145,7 @@ class TagsWorkerStore(AccountDataWorkerStore): for room_id in room_ids: results[room_id] = tags_by_room.get(room_id, {}) - defer.returnValue(results) + return results def get_tags_for_room(self, user_id, room_id): """Get all the tags for the given room @@ -194,7 +194,7 @@ class TagsStore(TagsWorkerStore): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def remove_tag_from_room(self, user_id, room_id, tag): @@ -217,7 +217,7 @@ class TagsStore(TagsWorkerStore): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_revision_txn(self, txn, user_id, room_id, next_id): """Update the latest revision of the tags for the given user and room. diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index c585cf6cf7..b3c3bf55bc 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -147,7 +147,7 @@ class TransactionStore(SQLBaseStore): result = self._destination_retry_cache.get(destination, SENTINEL) if result is not SENTINEL: - defer.returnValue(result) + return result result = yield self.runInteraction( "get_destination_retry_timings", @@ -158,7 +158,7 @@ class TransactionStore(SQLBaseStore): # We don't hugely care about race conditions between getting and # invalidating the cache, since we time out fairly quickly anyway. self._destination_retry_cache[destination] = result - defer.returnValue(result) + return result def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 7fd16fe65e..b5188d9bee 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -109,7 +109,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) yield self._end_background_update("populate_user_directory_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_cleanup(self, progress, batch_size): @@ -131,7 +131,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): ) yield self._end_background_update("populate_user_directory_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_process_rooms(self, progress, batch_size): @@ -177,7 +177,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_user_directory_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining" @@ -257,9 +257,9 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count @defer.inlineCallbacks def _populate_user_directory_process_users(self, progress, batch_size): @@ -268,7 +268,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): """ if not self.hs.config.user_directory_search_all_users: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 def _get_next_batch(txn): sql = "SELECT user_id FROM %s LIMIT %s" % ( @@ -298,7 +298,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): # No more users -- complete the transaction. if not users_to_work_on: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d users of %d remaining" @@ -322,7 +322,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): progress, ) - defer.returnValue(len(users_to_work_on)) + return len(users_to_work_on) @defer.inlineCallbacks def is_room_world_readable_or_publicly_joinable(self, room_id): @@ -344,16 +344,16 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): join_rule_ev = yield self.get_event(join_rules_id, allow_none=True) if join_rule_ev: if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC: - defer.returnValue(True) + return True hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, "")) if hist_vis_id: hist_vis_ev = yield self.get_event(hist_vis_id, allow_none=True) if hist_vis_ev: if hist_vis_ev.content.get("history_visibility") == "world_readable": - defer.returnValue(True) + return True - defer.returnValue(False) + return False def update_profile_in_user_dir(self, user_id, display_name, avatar_url): """ @@ -499,7 +499,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): user_ids = set(user_ids_share_pub) user_ids.update(user_ids_share_priv) - defer.returnValue(user_ids) + return user_ids def add_users_who_share_private_room(self, room_id, user_id_tuples): """Insert entries into the users_who_share_private_rooms table. The first @@ -609,7 +609,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): users = set(pub_rows) users.update(rows) - defer.returnValue(list(users)) + return list(users) @defer.inlineCallbacks def get_rooms_in_common_for_users(self, user_id, other_user_id): @@ -635,7 +635,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): "get_rooms_in_common_for_users", None, sql, user_id, other_user_id ) - defer.returnValue([room_id for room_id, in rows]) + return [room_id for room_id, in rows] def delete_all_from_user_dir(self): """Delete the entire user directory @@ -782,7 +782,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): limited = len(results) > limit - defer.returnValue({"limited": limited, "results": results}) + return {"limited": limited, "results": results} def _parse_query_sqlite(search_term): diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py index 1815fdc0dd..05cabc2282 100644 --- a/synapse/storage/user_erasure_store.py +++ b/synapse/storage/user_erasure_store.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import operator -from twisted.internet import defer +import operator from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedList @@ -67,7 +66,7 @@ class UserErasureWorkerStore(SQLBaseStore): erased_users = yield self.runInteraction("are_users_erased", _get_erased_users) res = dict((u, u in erased_users) for u in user_ids) - defer.returnValue(res) + return res class UserErasureStore(UserErasureWorkerStore): diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 488c49747a..b91fb2db7b 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -56,7 +56,7 @@ class EventSources(object): device_list_key=device_list_key, groups_key=groups_key, ) - defer.returnValue(token) + return token @defer.inlineCallbacks def get_current_token_for_pagination(self): @@ -80,4 +80,4 @@ class EventSources(object): device_list_key=0, groups_key=0, ) - defer.returnValue(token) + return token diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f506b2a695..841625a991 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -49,7 +49,7 @@ class Clock(object): with context.PreserveLoggingContext(): self._reactor.callLater(seconds, d.callback, seconds) res = yield d - defer.returnValue(res) + return res def time(self): """Returns the current system time in seconds since epoch.""" diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 58a6b8764f..f1c46836b1 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -366,7 +366,7 @@ class ReadWriteLock(object): new_defer.callback(None) self.key_to_current_readers.get(key, set()).discard(new_defer) - defer.returnValue(_ctx_manager()) + return _ctx_manager() @defer.inlineCallbacks def write(self, key): @@ -396,7 +396,7 @@ class ReadWriteLock(object): if self.key_to_current_writer[key] == new_defer: self.key_to_current_writer.pop(key) - defer.returnValue(_ctx_manager()) + return _ctx_manager() def _cancelled_to_timed_out_error(value, timeout): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 675db2f448..a1acacbde9 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -289,7 +289,7 @@ class CacheDescriptor(_CacheDescriptorBase): def foo(self, key, cache_context): r1 = yield self.bar1(key, on_invalidate=cache_context.invalidate) r2 = yield self.bar2(key, on_invalidate=cache_context.invalidate) - defer.returnValue(r1 + r2) + return r1 + r2 Args: num_args (int): number of positional arguments (excluding ``self`` and diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index d6908e169d..82d3eefe0e 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -121,7 +121,7 @@ class ResponseCache(object): @defer.inlineCallbacks def handle_request(request): # etc - defer.returnValue(result) + return result result = yield response_cache.wrap( key, diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c30b6de19c..0910930c21 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -67,7 +67,7 @@ def measure_func(name): def measured_func(self, *args, **kwargs): with Measure(self.clock, name): r = yield func(self, *args, **kwargs) - defer.returnValue(r) + return r return measured_func diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index d8d0ceae51..0862b5ca5a 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -95,15 +95,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) # maximum backoff even though it might only have been down briefly backoff_on_failure = not ignore_backoff - defer.returnValue( - RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - backoff_on_failure=backoff_on_failure, - **kwargs - ) + return RetryDestinationLimiter( + destination, + clock, + store, + retry_interval, + backoff_on_failure=backoff_on_failure, + **kwargs ) diff --git a/synapse/visibility.py b/synapse/visibility.py index 2a11c83596..bf0f1eebd8 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -208,7 +208,7 @@ def filter_events_for_client( filtered_events = filter(operator.truth, filtered_events) # we turn it into a list before returning it. - defer.returnValue(list(filtered_events)) + return list(filtered_events) @defer.inlineCallbacks @@ -317,11 +317,11 @@ def filter_events_for_server( elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return # If there are no erased users then we can just return the given list # of events without having to copy it. - defer.returnValue(events) + return events # Ok, so we're dealing with events that have non-trivial visibility # rules, so we need to also get the memberships of the room. @@ -384,4 +384,4 @@ def filter_events_for_server( elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8d94a503d6..c4f0bbd3dd 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -107,7 +107,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertEquals(LoggingContext.current_context().request, "11") with PreserveLoggingContext(): yield persp_deferred - defer.returnValue(persp_resp) + return persp_resp self.http_client.post_json.side_effect = get_perspectives @@ -554,7 +554,7 @@ def run_in_context(f, *args, **kwargs): # logs. ctx.request = "testctx" rv = yield f(*args, **kwargs) - defer.returnValue(rv) + return rv def _verify_json_for_server(kr, *args): @@ -565,6 +565,6 @@ def _verify_json_for_server(kr, *args): @defer.inlineCallbacks def v(): rv1 = yield kr.verify_json_for_server(*args) - defer.returnValue(rv1) + return rv1 return run_in_context(v) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 90d0129374..99dce45cfe 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -283,4 +283,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user, requester, displayname, by_admin=True ) - defer.returnValue((user_id, token)) + return (user_id, token) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index a49f9b3224..b906686b49 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -145,7 +145,7 @@ class MatrixFederationAgentTests(TestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res except Exception as e: logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e) raise @@ -936,7 +936,7 @@ class MatrixFederationAgentTests(TestCase): except Exception as e: logger.warning("Error fetching well-known: %s", e) raise - defer.returnValue(result) + return result def test_well_known_cache(self): self.reactor.lookups["testserv"] = "1.2.3.4" diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 65b51dc981..3b885ef64b 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -61,7 +61,7 @@ class SrvResolverTestCase(unittest.TestCase): # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) - defer.returnValue(result) + return result test_d = do_lookup() self.assertNoResult(test_d) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index b9d6d7ad1c..2b01f40a42 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -68,7 +68,7 @@ class FederationClientTests(HomeserverTestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res finally: check_logcontext(context) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index a8adc9a61d..a3d7e3c046 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -46,7 +46,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) - defer.returnValue("yay") + return "yay" @defer.inlineCallbacks def test(): diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index fbb9302694..9fabe3fbc0 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -43,7 +43,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): "test_update", progress, ) - defer.returnValue(count) + return count self.update_handler.side_effect = update @@ -60,7 +60,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def update(progress, count): yield self.store._end_background_update("test_update") - defer.returnValue(count) + return count self.update_handler.side_effect = update self.update_handler.reset_mock() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 732a778fab..1cb471205b 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -69,7 +69,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, room, user, body): @@ -92,7 +92,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a..c6e8196b91 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -67,7 +67,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_one_member(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 212a7ae765..5c2cf3c2db 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -65,7 +65,7 @@ class StateStoreTestCase(tests.unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event def assertStateMapEqual(self, s1, s2): for t in s1: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 118c3bd238..e0605dac2f 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -139,7 +139,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): builder ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_room_member(self, user_id, membership="join", extra_content={}): @@ -161,7 +161,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, user_id, content=None): @@ -182,7 +182,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_large_room(self): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 7807328e2f..56320bbaf9 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -159,7 +159,7 @@ class DescriptorTestCase(unittest.TestCase): def inner_fn(): with PreserveLoggingContext(): yield complete_lookup - defer.returnValue(1) + return 1 return inner_fn() @@ -169,7 +169,7 @@ class DescriptorTestCase(unittest.TestCase): c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) - defer.returnValue(r) + return r def check_result(r): self.assertEqual(r, 1) @@ -286,7 +286,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" @@ -334,7 +334,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function yield run_on_reactor() - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) obj = Cls() invalidate0 = mock.Mock() diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b47..425e3387db 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -361,7 +361,7 @@ def setup_test_homeserver( if fed: register_federation_servlets(hs, fed) - defer.returnValue(hs) + return hs def register_federation_servlets(hs, resource): @@ -465,9 +465,9 @@ class MockHttpResource(HttpServer): args = [urlparse.unquote(u) for u in matcher.groups()] (code, response) = yield func(mock_request, *args) - defer.returnValue((code, response)) + return (code, response) except CodeMessageException as e: - defer.returnValue((e.code, cs_error(e.msg, code=e.errcode))) + return (e.code, cs_error(e.msg, code=e.errcode)) raise KeyError("No event can handle %s" % path) -- cgit 1.5.1 From 864f14454322c6cba11476667ade8fc6cbea6f44 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 2 Oct 2019 05:29:01 -0700 Subject: Fix up some typechecking (#6150) * type checking fixes * changelog --- .gitignore | 1 + changelog.d/6150.misc | 1 + synapse/api/errors.py | 3 ++- synapse/api/room_versions.py | 5 ++++- synapse/app/_base.py | 4 +++- synapse/config/appservice.py | 5 +++-- synapse/config/consent_config.py | 4 ++-- synapse/config/password_auth_providers.py | 4 +++- synapse/config/repository.py | 5 +++-- synapse/config/server.py | 10 +++++++--- synapse/config/server_notices_config.py | 4 ++-- synapse/logging/opentracing.py | 9 +++++---- synapse/logging/utils.py | 20 ++++++++++++++++---- synapse/metrics/__init__.py | 4 ++-- synapse/metrics/_exposition.py | 4 ++-- synapse/python_dependencies.py | 17 +++++++++++++---- synapse/types.py | 3 ++- synapse/util/async_helpers.py | 10 +++++++--- synapse/util/caches/__init__.py | 3 ++- synapse/util/caches/descriptors.py | 22 ++++++++++++++++++++-- synapse/util/caches/treecache.py | 4 +++- synapse/util/module_loader.py | 2 +- 22 files changed, 104 insertions(+), 40 deletions(-) create mode 100644 changelog.d/6150.misc (limited to 'synapse/util/async_helpers.py') diff --git a/.gitignore b/.gitignore index e53d4908d5..747b8714d7 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ *.tac _trial_temp/ _trial_temp*/ +/out # stuff that is likely to exist when you run a server locally /*.db diff --git a/changelog.d/6150.misc b/changelog.d/6150.misc new file mode 100644 index 0000000000..a373c091ab --- /dev/null +++ b/changelog.d/6150.misc @@ -0,0 +1 @@ +Expand type-checking on modules imported by synapse.config. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index cf1ebf1af2..1bb2e86789 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -17,6 +17,7 @@ """Contains exceptions and error codes.""" import logging +from typing import Dict from six import iteritems from six.moves import http_client @@ -111,7 +112,7 @@ class ProxiedRequestError(SynapseError): def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None): super(ProxiedRequestError, self).__init__(code, msg, errcode) if additional_fields is None: - self._additional_fields = {} + self._additional_fields = {} # type: Dict else: self._additional_fields = dict(additional_fields) diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 95292b7dec..c6f50fd7b9 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Dict + import attr @@ -102,4 +105,4 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V4, RoomVersions.V5, ) -} # type: dict[str, RoomVersion] +} # type: Dict[str, RoomVersion] diff --git a/synapse/app/_base.py b/synapse/app/_base.py index c30fdeee9a..2ac7d5c064 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -263,7 +263,9 @@ def start(hs, listeners=None): refresh_certificate(hs) # Start the tracer - synapse.logging.opentracing.init_tracer(hs.config) + synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa + hs.config + ) # It is now safe to start your Synapse. hs.start_listening(listeners) diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 8387ff6805..28d36b1bc3 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +from typing import Dict from six import string_types from six.moves.urllib import parse as urlparse @@ -56,8 +57,8 @@ def load_appservices(hostname, config_files): return [] # Dicts of value -> filename - seen_as_tokens = {} - seen_ids = {} + seen_as_tokens = {} # type: Dict[str, str] + seen_ids = {} # type: Dict[str, str] appservices = [] diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index 94916f3a49..48976e17b1 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -73,8 +73,8 @@ DEFAULT_CONFIG = """\ class ConsentConfig(Config): - def __init__(self): - super(ConsentConfig, self).__init__() + def __init__(self, *args): + super(ConsentConfig, self).__init__(*args) self.user_consent_version = None self.user_consent_template_dir = None diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 788c39c9fb..c50e244394 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, List + from synapse.util.module_loader import load_module from ._base import Config @@ -22,7 +24,7 @@ LDAP_PROVIDER = "ldap_auth_provider.LdapAuthProvider" class PasswordAuthProviderConfig(Config): def read_config(self, config, **kwargs): - self.password_providers = [] + self.password_providers = [] # type: List[Any] providers = [] # We want to be backwards compatible with the old `ldap_config` diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 52e014608a..14740891f3 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -15,6 +15,7 @@ import os from collections import namedtuple +from typing import Dict, List from synapse.python_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module @@ -61,7 +62,7 @@ def parse_thumbnail_requirements(thumbnail_sizes): Dictionary mapping from media type string to list of ThumbnailRequirement tuples. """ - requirements = {} + requirements = {} # type: Dict[str, List] for size in thumbnail_sizes: width = size["width"] height = size["height"] @@ -130,7 +131,7 @@ class ContentRepositoryConfig(Config): # # We don't create the storage providers here as not all workers need # them to be started. - self.media_storage_providers = [] + self.media_storage_providers = [] # type: List[tuple] for provider_config in storage_providers: # We special case the module "file_system" so as not to need to diff --git a/synapse/config/server.py b/synapse/config/server.py index 536ee7f29c..709bd387e5 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,6 +19,7 @@ import logging import os.path import re from textwrap import indent +from typing import List import attr import yaml @@ -243,7 +244,7 @@ class ServerConfig(Config): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) - self.listeners = [] + self.listeners = [] # type: List[dict] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): raise ConfigError( @@ -287,7 +288,10 @@ class ServerConfig(Config): validator=attr.validators.instance_of(bool), default=False ) complexity = attr.ib( - validator=attr.validators.instance_of((int, float)), default=1.0 + validator=attr.validators.instance_of( + (float, int) # type: ignore[arg-type] # noqa + ), + default=1.0, ) complexity_error = attr.ib( validator=attr.validators.instance_of(str), @@ -366,7 +370,7 @@ class ServerConfig(Config): "cleanup_extremities_with_dummy_events", True ) - def has_tls_listener(self): + def has_tls_listener(self) -> bool: return any(l["tls"] for l in self.listeners) def generate_config_section( diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index eaac3d73bc..6d4285ef93 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -59,8 +59,8 @@ class ServerNoticesConfig(Config): None if server notices are not enabled. """ - def __init__(self): - super(ServerNoticesConfig, self).__init__() + def __init__(self, *args): + super(ServerNoticesConfig, self).__init__(*args) self.server_notices_mxid = None self.server_notices_mxid_display_name = None self.server_notices_mxid_avatar_url = None diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 308a27213b..cd1ff6a518 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -170,6 +170,7 @@ import inspect import logging import re from functools import wraps +from typing import Dict from canonicaljson import json @@ -547,7 +548,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T return span = opentracing.tracer.active_span - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -584,7 +585,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True): span = opentracing.tracer.active_span - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -639,7 +640,7 @@ def get_active_span_text_map(destination=None): if destination and not whitelisted_homeserver(destination): return {} - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject( opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier ) @@ -653,7 +654,7 @@ def active_span_context_as_string(): Returns: The active span context encoded as a string. """ - carrier = {} + carrier = {} # type: Dict[str, str] if opentracing: opentracing.tracer.inject( opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index 7df0fa6087..6073fc2725 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -119,7 +119,11 @@ def trace_function(f): logger = logging.getLogger(name) level = logging.DEBUG - s = inspect.currentframe().f_back + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + + s = frame.f_back to_print = [ "\t%s:%s %s. Args: args=%s, kwargs=%s" @@ -144,7 +148,7 @@ def trace_function(f): pathname=pathname, lineno=lineno, msg=msg, - args=None, + args=tuple(), exc_info=None, ) @@ -157,7 +161,12 @@ def trace_function(f): def get_previous_frames(): - s = inspect.currentframe().f_back.f_back + + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + + s = frame.f_back.f_back to_return = [] while s: if s.f_globals["__name__"].startswith("synapse"): @@ -174,7 +183,10 @@ def get_previous_frames(): def get_previous_frame(ignore=[]): - s = inspect.currentframe().f_back.f_back + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + s = frame.f_back.f_back while s: if s.f_globals["__name__"].startswith("synapse"): diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index bec3b13397..0b45e1f52a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -125,7 +125,7 @@ class InFlightGauge(object): ) # Counts number of in flight blocks for a given set of label values - self._registrations = {} + self._registrations = {} # type: Dict # Protects access to _registrations self._lock = threading.Lock() @@ -226,7 +226,7 @@ class BucketCollector(object): # Fetch the data -- this must be synchronous! data = self.data_collector() - buckets = {} + buckets = {} # type: Dict[float, int] res = [] for x in data.keys(): diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 74d9c3ecd3..a248103191 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -36,9 +36,9 @@ from twisted.web.resource import Resource try: from prometheus_client.samples import Sample except ImportError: - Sample = namedtuple( + Sample = namedtuple( # type: ignore[no-redef] # noqa "Sample", ["name", "labels", "value", "timestamp", "exemplar"] - ) # type: ignore + ) CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0bd563edc7..aa7da1c543 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import Set +from typing import List, Set from pkg_resources import ( DistributionNotFound, @@ -73,6 +73,7 @@ REQUIREMENTS = [ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", + "typing-extensions>=3.7.4", ] CONDITIONAL_REQUIREMENTS = { @@ -144,7 +145,11 @@ def check_requirements(for_feature=None): deps_needed.append(dependency) errors.append( "Needed %s, got %s==%s" - % (dependency, e.dist.project_name, e.dist.version) + % ( + dependency, + e.dist.project_name, # type: ignore[attr-defined] # noqa + e.dist.version, # type: ignore[attr-defined] # noqa + ) ) except DistributionNotFound: deps_needed.append(dependency) @@ -159,7 +164,7 @@ def check_requirements(for_feature=None): if not for_feature: # Check the optional dependencies are up to date. We allow them to not be # installed. - OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) + OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str] for dependency in OPTS: try: @@ -168,7 +173,11 @@ def check_requirements(for_feature=None): deps_needed.append(dependency) errors.append( "Needed optional %s, got %s==%s" - % (dependency, e.dist.project_name, e.dist.version) + % ( + dependency, + e.dist.project_name, # type: ignore[attr-defined] # noqa + e.dist.version, # type: ignore[attr-defined] # noqa + ) ) except DistributionNotFound: # If it's not found, we don't care diff --git a/synapse/types.py b/synapse/types.py index 51eadb6ad4..8f79797f17 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -318,6 +318,7 @@ class StreamToken( ) ): _SEPARATOR = "_" + START = None # type: StreamToken @classmethod def from_string(cls, string): @@ -402,7 +403,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): followed by the "stream_ordering" id of the event it comes after. """ - __slots__ = [] + __slots__ = [] # type: list @classmethod def parse(cls, string): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index f1c46836b1..0d3bdd88ce 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -13,9 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import collections import logging from contextlib import contextmanager +from typing import Dict, Sequence, Set, Union from six.moves import range @@ -213,7 +215,9 @@ class Linearizer(object): # the first element is the number of things executing, and # the second element is an OrderedDict, where the keys are deferreds for the # things blocked from executing. - self.key_to_defer = {} + self.key_to_defer = ( + {} + ) # type: Dict[str, Sequence[Union[int, Dict[defer.Deferred, int]]]] def queue(self, key): # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly. @@ -340,10 +344,10 @@ class ReadWriteLock(object): def __init__(self): # Latest readers queued - self.key_to_current_readers = {} + self.key_to_current_readers = {} # type: Dict[str, Set[defer.Deferred]] # Latest writer queued - self.key_to_current_writer = {} + self.key_to_current_writer = {} # type: Dict[str, defer.Deferred] @defer.inlineCallbacks def read(self, key): diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index b50e3503f0..43fd65d693 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -16,6 +16,7 @@ import logging import os +from typing import Dict import six from six.moves import intern @@ -37,7 +38,7 @@ def get_cache_factor_for(cache_name): caches_by_name = {} -collectors_by_name = {} +collectors_by_name = {} # type: Dict cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"]) cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"]) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 43f66ec4be..5ac2530a6a 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -18,10 +18,12 @@ import inspect import logging import threading from collections import namedtuple +from typing import Any, cast from six import itervalues from prometheus_client import Gauge +from typing_extensions import Protocol from twisted.internet import defer @@ -37,6 +39,18 @@ from . import register_cache logger = logging.getLogger(__name__) +class _CachedFunction(Protocol): + invalidate = None # type: Any + invalidate_all = None # type: Any + invalidate_many = None # type: Any + prefill = None # type: Any + cache = None # type: Any + num_args = None # type: Any + + def __name__(self): + ... + + cache_pending_metric = Gauge( "synapse_util_caches_cache_pending", "Number of lookups currently pending for this cache", @@ -245,7 +259,9 @@ class Cache(object): class _CacheDescriptorBase(object): - def __init__(self, orig, num_args, inlineCallbacks, cache_context=False): + def __init__( + self, orig: _CachedFunction, num_args, inlineCallbacks, cache_context=False + ): self.orig = orig if inlineCallbacks: @@ -404,7 +420,7 @@ class CacheDescriptor(_CacheDescriptorBase): return tuple(get_cache_key_gen(args, kwargs)) @functools.wraps(self.orig) - def wrapped(*args, **kwargs): + def _wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) @@ -440,6 +456,8 @@ class CacheDescriptor(_CacheDescriptorBase): return make_deferred_yieldable(observer) + wrapped = cast(_CachedFunction, _wrapped) + if self.num_args == 1: wrapped.invalidate = lambda key: cache.invalidate(key[0]) wrapped.prefill = lambda key, val: cache.prefill(key[0], val) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 9a72218d85..2ea4e4e911 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -1,3 +1,5 @@ +from typing import Dict + from six import itervalues SENTINEL = object() @@ -12,7 +14,7 @@ class TreeCache(object): def __init__(self): self.size = 0 - self.root = {} + self.root = {} # type: Dict def __setitem__(self, key, value): return self.set(key, value) diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 7ff7eb1e4d..2705cbe5f8 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -54,5 +54,5 @@ def load_python_module(location: str): if spec is None: raise Exception("Unable to load module at %s" % (location,)) mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) + spec.loader.exec_module(mod) # type: ignore return mod -- cgit 1.5.1 From 3c2d6c708cd93df7fc945e10014049e9f9b36f46 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Oct 2019 15:26:09 +0100 Subject: Add maybe_awaitable and fix __init__ bugs --- synapse/rest/admin/__init__.py | 7 +++++-- synapse/util/async_helpers.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'synapse/util/async_helpers.py') diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index f7b9483008..939418ee2b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -44,6 +44,7 @@ from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.users import UserAdminServlet from synapse.types import UserID, create_requester +from synapse.util.async_helpers import maybe_awaitable from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) @@ -310,7 +311,7 @@ class PurgeHistoryRestServlet(RestServlet): errcode=Codes.BAD_JSON, ) - purge_id = await self.pagination_handler.start_purge_history( + purge_id = self.pagination_handler.start_purge_history( room_id, token, delete_local_events=delete_local_events ) @@ -480,7 +481,9 @@ class ShutdownRoomRestServlet(RestServlet): ratelimit=False, ) - aliases_for_room = await self.store.get_aliases_for_room(room_id) + aliases_for_room = await maybe_awaitable( + self.store.get_aliases_for_room(room_id) + ) await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 0d3bdd88ce..804dbca443 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -21,6 +21,8 @@ from typing import Dict, Sequence, Set, Union from six.moves import range +import attr + from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.python import failure @@ -483,3 +485,30 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): deferred.addCallbacks(success_cb, failure_cb) return new_d + + +@attr.s(slots=True, frozen=True) +class DoneAwaitable(object): + """Simple awaitable that returns the provided value. + """ + + value = attr.ib() + + def __await__(self): + return self + + def __iter__(self): + return self + + def __next__(self): + raise StopIteration(self.value) + + +def maybe_awaitable(value): + """Convert a value to an awaitable if not already an awaitable. + """ + + if hasattr(value, "__await__"): + return value + + return DoneAwaitable(value) -- cgit 1.5.1 From 09a135b0391a7a65c24c8c7b046cb6573dee3947 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Oct 2019 15:02:23 +0000 Subject: Make concurrently_execute work with async/await --- synapse/util/async_helpers.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'synapse/util/async_helpers.py') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 804dbca443..7659eaeb42 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -138,7 +138,7 @@ def concurrently_execute(func, args, limit): the number of concurrent executions. Args: - func (func): Function to execute, should return a deferred. + func (func): Function to execute, should return a deferred or coroutine. args (list): List of arguments to pass to func, each invocation of func gets a signle argument. limit (int): Maximum number of conccurent executions. @@ -148,11 +148,10 @@ def concurrently_execute(func, args, limit): """ it = iter(args) - @defer.inlineCallbacks - def _concurrently_execute_inner(): + async def _concurrently_execute_inner(): try: while True: - yield func(next(it)) + await maybe_awaitable(func(next(it))) except StopIteration: pass -- cgit 1.5.1 From 326b3dace77aeb36e516ea9b04ba1baa171bcb47 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Oct 2019 11:35:46 +0000 Subject: Make ObservableDeferred.observe() always return deferred. This makes it easier to use in an async/await world. Also fixes a bug where cache descriptors would occaisonally return a raw value rather than a deferred. --- synapse/util/async_helpers.py | 7 ++----- tests/storage/test__base.py | 2 +- tests/util/caches/test_descriptors.py | 4 ++-- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'synapse/util/async_helpers.py') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 7659eaeb42..fd75ba27ad 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -86,11 +86,8 @@ class ObservableDeferred(object): deferred.addCallbacks(callback, errback) - def observe(self): + def observe(self) -> defer.Deferred: """Observe the underlying deferred. - - Can return either a deferred if the underlying deferred is still pending - (or has failed), or the actual value. Callers may need to use maybeDeferred. """ if not self._result: d = defer.Deferred() @@ -105,7 +102,7 @@ class ObservableDeferred(object): return d else: success, res = self._result - return res if success else defer.fail(res) + return defer.succeed(res) if success else defer.fail(res) def observers(self): return self._observers diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index dd49a14524..9b81b536f5 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -197,7 +197,7 @@ class CacheDecoratorTestCase(unittest.TestCase): a.func.prefill(("foo",), ObservableDeferred(d)) - self.assertEquals(a.func("foo"), d.result) + self.assertEquals(a.func("foo").result, d.result) self.assertEquals(callcount[0], 0) @defer.inlineCallbacks diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index f907903511..39e360fe24 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -310,14 +310,14 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.return_value = ["spam", "eggs"] r = obj.fn(1, 2) - self.assertEqual(r, ["spam", "eggs"]) + self.assertEqual(r.result, ["spam", "eggs"]) obj.mock.assert_called_once_with(1, 2) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = ["chips"] r = obj.fn(1, 3) - self.assertEqual(r, ["chips"]) + self.assertEqual(r.result, ["chips"]) obj.mock.assert_called_once_with(1, 3) obj.mock.reset_mock() -- cgit 1.5.1 From 6e677403b7c71511374b1532b56e0c411840f87b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Oct 2019 11:52:04 +0000 Subject: Clarify docstring --- synapse/util/async_helpers.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse/util/async_helpers.py') diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index fd75ba27ad..b60a604474 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -88,6 +88,10 @@ class ObservableDeferred(object): def observe(self) -> defer.Deferred: """Observe the underlying deferred. + + This returns a brand new deferred that is resolved when the underlying + deferred is resolved. Interacting with the returned deferred does not + effect the underdlying deferred. """ if not self._result: d = defer.Deferred() -- cgit 1.5.1 From 54fef094b31e0401d6d35efdf7d5d6b0b9e5d51f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 31 Oct 2019 10:23:24 +0000 Subject: Remove usage of deprecated logger.warn method from codebase (#6271) Replace every instance of `logger.warn` with `logger.warning` as the former is deprecated. --- changelog.d/6271.misc | 1 + scripts/move_remote_media_to_new_store.py | 2 +- scripts/synapse_port_db | 6 ++-- synapse/api/auth.py | 2 +- synapse/app/__init__.py | 4 ++- synapse/app/appservice.py | 4 +-- synapse/app/client_reader.py | 4 +-- synapse/app/event_creator.py | 4 +-- synapse/app/federation_reader.py | 4 +-- synapse/app/federation_sender.py | 4 +-- synapse/app/frontend_proxy.py | 4 +-- synapse/app/homeserver.py | 6 ++-- synapse/app/media_repository.py | 4 +-- synapse/app/pusher.py | 4 +-- synapse/app/synchrotron.py | 4 +-- synapse/app/user_dir.py | 4 +-- synapse/config/key.py | 4 +-- synapse/config/logger.py | 2 +- synapse/event_auth.py | 2 +- synapse/federation/federation_base.py | 6 ++-- synapse/federation/federation_client.py | 8 +++-- synapse/federation/federation_server.py | 20 ++++++------ synapse/federation/sender/transaction_manager.py | 4 +-- synapse/federation/transport/server.py | 8 +++-- synapse/groups/attestations.py | 2 +- synapse/groups/groups_server.py | 2 +- synapse/handlers/auth.py | 6 ++-- synapse/handlers/device.py | 4 +-- synapse/handlers/devicemessage.py | 2 +- synapse/handlers/federation.py | 36 ++++++++++++---------- synapse/handlers/groups_local.py | 2 +- synapse/handlers/identity.py | 6 ++-- synapse/handlers/message.py | 2 +- synapse/handlers/profile.py | 2 +- synapse/handlers/room.py | 2 +- synapse/http/client.py | 4 +-- synapse/http/federation/srv_resolver.py | 2 +- synapse/http/matrixfederationclient.py | 10 +++--- synapse/http/request_metrics.py | 2 +- synapse/http/server.py | 2 +- synapse/http/servlet.py | 4 +-- synapse/http/site.py | 4 +-- synapse/logging/context.py | 2 +- synapse/push/httppusher.py | 4 +-- synapse/push/push_rule_evaluator.py | 4 +-- synapse/replication/http/_base.py | 2 +- synapse/replication/http/membership.py | 2 +- synapse/replication/tcp/client.py | 2 +- synapse/replication/tcp/protocol.py | 2 +- synapse/rest/admin/__init__.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v2_alpha/account.py | 14 ++++----- synapse/rest/client/v2_alpha/register.py | 10 +++--- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/media/v1/media_repository.py | 12 +++++--- synapse/rest/media/v1/preview_url_resource.py | 16 +++++----- synapse/rest/media/v1/thumbnail_resource.py | 4 +-- .../resource_limits_server_notices.py | 2 +- synapse/storage/_base.py | 6 ++-- synapse/storage/data_stores/main/pusher.py | 2 +- synapse/storage/data_stores/main/search.py | 2 +- synapse/util/async_helpers.py | 2 +- synapse/util/caches/__init__.py | 2 +- synapse/util/metrics.py | 6 ++-- synapse/util/rlimit.py | 2 +- 65 files changed, 164 insertions(+), 149 deletions(-) create mode 100644 changelog.d/6271.misc (limited to 'synapse/util/async_helpers.py') diff --git a/changelog.d/6271.misc b/changelog.d/6271.misc new file mode 100644 index 0000000000..2369760272 --- /dev/null +++ b/changelog.d/6271.misc @@ -0,0 +1 @@ +Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. \ No newline at end of file diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index 12747c6024..b5b63933ab 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -72,7 +72,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths): # check that the original exists original_file = src_paths.remote_media_filepath(origin_server, file_id) if not os.path.exists(original_file): - logger.warn( + logger.warning( "Original for %s/%s (%s) does not exist", origin_server, file_id, diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 54faed1e83..0d3321682c 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -157,7 +157,7 @@ class Store( ) except self.database_engine.module.DatabaseError as e: if self.database_engine.is_deadlock(e): - logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N) + logger.warning("[TXN DEADLOCK] {%s} %d/%d", desc, i, N) if i < N: i += 1 conn.rollback() @@ -432,7 +432,7 @@ class Porter(object): for row in rows: d = dict(zip(headers, row)) if "\0" in d['value']: - logger.warn('dropping search row %s', d) + logger.warning('dropping search row %s', d) else: rows_dict.append(d) @@ -647,7 +647,7 @@ class Porter(object): if isinstance(col, bytes): return bytearray(col) elif isinstance(col, string_types) and "\0" in col: - logger.warn( + logger.warning( "DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 53f3bb0fa8..5d0b7d2801 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -497,7 +497,7 @@ class Auth(object): token = self.get_access_token_from_request(request) service = self.store.get_app_service_by_token(token) if not service: - logger.warn("Unrecognised appservice access token.") + logger.warning("Unrecognised appservice access token.") raise InvalidClientTokenError() request.authenticated_entity = service.sender return defer.succeed(service) diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index d877c77834..a01bac2997 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -44,6 +44,8 @@ def check_bind_error(e, address, bind_addresses): bind_addresses (list): Addresses on which the service listens. """ if address == "0.0.0.0" and "::" in bind_addresses: - logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]") + logger.warning( + "Failed to listen on 0.0.0.0, continuing because listening on [::]" + ) else: raise e diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 767b87d2db..02b900f382 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -94,7 +94,7 @@ class AppserviceServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -103,7 +103,7 @@ class AppserviceServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index dbcc414c42..dadb487d5f 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -153,7 +153,7 @@ class ClientReaderServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -162,7 +162,7 @@ class ClientReaderServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index f20d810ece..d110599a35 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -147,7 +147,7 @@ class EventCreatorServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -156,7 +156,7 @@ class EventCreatorServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 1ef027a88c..418c086254 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -132,7 +132,7 @@ class FederationReaderServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -141,7 +141,7 @@ class FederationReaderServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 04fbb407af..139221ad34 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 9504bfbc70..e647459d0e 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -204,7 +204,7 @@ class FrontendProxyServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -213,7 +213,7 @@ class FrontendProxyServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index eb54f56853..8997c1f9e7 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -282,7 +282,7 @@ class SynapseHomeServer(HomeServer): reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -291,7 +291,7 @@ class SynapseHomeServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) def run_startup_checks(self, db_conn, database_engine): all_users_native = are_all_users_on_domain( @@ -569,7 +569,7 @@ def run(hs): hs.config.report_stats_endpoint, stats ) except Exception as e: - logger.warn("Error reporting stats: %s", e) + logger.warning("Error reporting stats: %s", e) def performance_stats_init(): try: diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 6bc7202f33..2c6dd3ef02 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -120,7 +120,7 @@ class MediaRepositoryServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -129,7 +129,7 @@ class MediaRepositoryServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index d84732ee3c..01a5ffc363 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -114,7 +114,7 @@ class PusherServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -123,7 +123,7 @@ class PusherServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 6a7e2fa707..b14da09f47 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -326,7 +326,7 @@ class SynchrotronServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -335,7 +335,7 @@ class SynchrotronServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index a5d6dc7915..6cb100319f 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer): ) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: - logger.warn( + logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" @@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer): else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: - logger.warn("Unrecognized listener type: %s", listener["type"]) + logger.warning("Unrecognized listener type: %s", listener["type"]) self.get_tcp_replication().start_replication(self) diff --git a/synapse/config/key.py b/synapse/config/key.py index ec5d430afb..52ff1b2621 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -125,7 +125,7 @@ class KeyConfig(Config): # if neither trusted_key_servers nor perspectives are given, use the default. if "perspectives" not in config and "trusted_key_servers" not in config: - logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN) + logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN) key_servers = [{"server_name": "matrix.org"}] else: key_servers = config.get("trusted_key_servers", []) @@ -156,7 +156,7 @@ class KeyConfig(Config): if not self.macaroon_secret_key: # Unfortunately, there are people out there that don't have this # set. Lets just be "nice" and derive one from their secret key. - logger.warn("Config is missing macaroon_secret_key") + logger.warning("Config is missing macaroon_secret_key") seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() diff --git a/synapse/config/logger.py b/synapse/config/logger.py index be92e33f93..2d2c1e54df 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -182,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None): logger = logging.getLogger("") if not log_config: - logger.warn("Reloaded a blank config?") + logger.warning("Reloaded a blank config?") logging.config.dictConfig(log_config) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e7b722547b..ec3243b27b 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -77,7 +77,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru if auth_events is None: # Oh, we don't know what the state of the room was, so we # are trusting that this is allowed (at least for now) - logger.warn("Trusting event: %s", event.event_id) + logger.warning("Trusting event: %s", event.event_id) return if event.type == EventTypes.Create: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 223aace0d9..0e22183280 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -102,7 +102,7 @@ class FederationBase(object): pass if not res: - logger.warn( + logger.warning( "Failed to find copy of %s with valid signature", pdu.event_id ) @@ -173,7 +173,7 @@ class FederationBase(object): return redacted_event if self.spam_checker.check_event_for_spam(pdu): - logger.warn( + logger.warning( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json(), @@ -185,7 +185,7 @@ class FederationBase(object): def errback(failure, pdu): failure.trap(SynapseError) with PreserveLoggingContext(ctx): - logger.warn( + logger.warning( "Signature check failed for %s: %s", pdu.event_id, failure.getErrorMessage(), diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f5c1632916..595706d01a 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -522,12 +522,12 @@ class FederationClient(FederationBase): res = yield callback(destination) return res except InvalidResponseError as e: - logger.warn("Failed to %s via %s: %s", description, destination, e) + logger.warning("Failed to %s via %s: %s", description, destination, e) except HttpResponseException as e: if not 500 <= e.code < 600: raise e.to_synapse_error() else: - logger.warn( + logger.warning( "Failed to %s via %s: %i %s", description, destination, @@ -535,7 +535,9 @@ class FederationClient(FederationBase): e.args[0], ) except Exception: - logger.warn("Failed to %s via %s", description, destination, exc_info=1) + logger.warning( + "Failed to %s via %s", description, destination, exc_info=1 + ) raise SynapseError(502, "Failed to %s via any server" % (description,)) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d5a19764d2..d942d77a72 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -220,7 +220,7 @@ class FederationServer(FederationBase): try: await self.check_server_matches_acl(origin_host, room_id) except AuthError as e: - logger.warn("Ignoring PDUs for room %s from banned server", room_id) + logger.warning("Ignoring PDUs for room %s from banned server", room_id) for pdu in pdus_by_room[room_id]: event_id = pdu.event_id pdu_results[event_id] = e.error_dict() @@ -233,7 +233,7 @@ class FederationServer(FederationBase): await self._handle_received_pdu(origin, pdu) pdu_results[event_id] = {} except FederationError as e: - logger.warn("Error handling PDU %s: %s", event_id, e) + logger.warning("Error handling PDU %s: %s", event_id, e) pdu_results[event_id] = {"error": str(e)} except Exception as e: f = failure.Failure() @@ -333,7 +333,9 @@ class FederationServer(FederationBase): room_version = await self.store.get_room_version(room_id) if room_version not in supported_versions: - logger.warn("Room version %s not in %s", room_version, supported_versions) + logger.warning( + "Room version %s not in %s", room_version, supported_versions + ) raise IncompatibleRoomVersionError(room_version=room_version) pdu = await self.handler.on_make_join_request(origin, room_id, user_id) @@ -679,7 +681,7 @@ def server_matches_acl_event(server_name, acl_event): # server name is a literal IP allow_ip_literals = acl_event.content.get("allow_ip_literals", True) if not isinstance(allow_ip_literals, bool): - logger.warn("Ignorning non-bool allow_ip_literals flag") + logger.warning("Ignorning non-bool allow_ip_literals flag") allow_ip_literals = True if not allow_ip_literals: # check for ipv6 literals. These start with '['. @@ -693,7 +695,7 @@ def server_matches_acl_event(server_name, acl_event): # next, check the deny list deny = acl_event.content.get("deny", []) if not isinstance(deny, (list, tuple)): - logger.warn("Ignorning non-list deny ACL %s", deny) + logger.warning("Ignorning non-list deny ACL %s", deny) deny = [] for e in deny: if _acl_entry_matches(server_name, e): @@ -703,7 +705,7 @@ def server_matches_acl_event(server_name, acl_event): # then the allow list. allow = acl_event.content.get("allow", []) if not isinstance(allow, (list, tuple)): - logger.warn("Ignorning non-list allow ACL %s", allow) + logger.warning("Ignorning non-list allow ACL %s", allow) allow = [] for e in allow: if _acl_entry_matches(server_name, e): @@ -717,7 +719,7 @@ def server_matches_acl_event(server_name, acl_event): def _acl_entry_matches(server_name, acl_entry): if not isinstance(acl_entry, six.string_types): - logger.warn( + logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) ) return False @@ -772,7 +774,7 @@ class FederationHandlerRegistry(object): async def on_edu(self, edu_type, origin, content): handler = self.edu_handlers.get(edu_type) if not handler: - logger.warn("No handler registered for EDU type %s", edu_type) + logger.warning("No handler registered for EDU type %s", edu_type) with start_active_span_from_edu(content, "handle_edu"): try: @@ -785,7 +787,7 @@ class FederationHandlerRegistry(object): def on_query(self, query_type, args): handler = self.query_handlers.get(query_type) if not handler: - logger.warn("No handler registered for query type %s", query_type) + logger.warning("No handler registered for query type %s", query_type) raise NotFoundError("No handler for Query type '%s'" % (query_type,)) return handler(args) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 5b6c79c51a..67b3e1ab6e 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -146,7 +146,7 @@ class TransactionManager(object): if code == 200: for e_id, r in response.get("pdus", {}).items(): if "error" in r: - logger.warn( + logger.warning( "TX [%s] {%s} Remote returned error for %s: %s", destination, txn_id, @@ -155,7 +155,7 @@ class TransactionManager(object): ) else: for p in pdus: - logger.warn( + logger.warning( "TX [%s] {%s} Failed to send event %s", destination, txn_id, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 0f16f21c2d..d6c23f22bd 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -202,7 +202,7 @@ def _parse_auth_header(header_bytes): sig = strip_quotes(param_dict["sig"]) return origin, key, sig except Exception as e: - logger.warn( + logger.warning( "Error parsing auth header '%s': %s", header_bytes.decode("ascii", "replace"), e, @@ -287,10 +287,12 @@ class BaseFederationServlet(object): except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: - logger.warn("authenticate_request failed: missing authentication") + logger.warning( + "authenticate_request failed: missing authentication" + ) raise except Exception as e: - logger.warn("authenticate_request failed: %s", e) + logger.warning("authenticate_request failed: %s", e) raise request_tags = { diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index dfd7ae041b..d950a8b246 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -181,7 +181,7 @@ class GroupAttestionRenewer(object): elif not self.is_mine_id(user_id): destination = get_domain_from_id(user_id) else: - logger.warn( + logger.warning( "Incorrectly trying to do attestations for user: %r in %r", user_id, group_id, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 8f10b6adbb..29e8ffc295 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -488,7 +488,7 @@ class GroupsServerHandler(object): profile = yield self.profile_handler.get_profile_from_cache(user_id) user_profile.update(profile) except Exception as e: - logger.warn("Error getting profile for %s: %s", user_id, e) + logger.warning("Error getting profile for %s: %s", user_id, e) user_profiles.append(user_profile) return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 333eb30625..7a0f54ca24 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -525,7 +525,7 @@ class AuthHandler(BaseHandler): result = None if not user_infos: - logger.warn("Attempted to login as %s but they do not exist", user_id) + logger.warning("Attempted to login as %s but they do not exist", user_id) elif len(user_infos) == 1: # a single match (possibly not exact) result = user_infos.popitem() @@ -534,7 +534,7 @@ class AuthHandler(BaseHandler): result = (user_id, user_infos[user_id]) else: # multiple matches, none of them exact - logger.warn( + logger.warning( "Attempted to login as %s but it matches more than one user " "inexactly: %r", user_id, @@ -728,7 +728,7 @@ class AuthHandler(BaseHandler): result = yield self.validate_hash(password, password_hash) if not result: - logger.warn("Failed password login for user %s", user_id) + logger.warning("Failed password login for user %s", user_id) return None return user_id diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5f23ee4488..befef2cf3d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -656,7 +656,7 @@ class DeviceListUpdater(object): except (NotRetryingDestination, RequestSendFailed, HttpResponseException): # TODO: Remember that we are now out of sync and try again # later - logger.warn("Failed to handle device list update for %s", user_id) + logger.warning("Failed to handle device list update for %s", user_id) # We abort on exceptions rather than accepting the update # as otherwise synapse will 'forget' that its device list # is out of date. If we bail then we will retry the resync @@ -694,7 +694,7 @@ class DeviceListUpdater(object): # up on storing the total list of devices and only handle the # delta instead. if len(devices) > 1000: - logger.warn( + logger.warning( "Ignoring device list snapshot for %s as it has >1K devs (%d)", user_id, len(devices), diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 0043cbea17..73b9e120f5 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -52,7 +52,7 @@ class DeviceMessageHandler(object): local_messages = {} sender_user_id = content["sender"] if origin != get_domain_from_id(sender_user_id): - logger.warn( + logger.warning( "Dropping device message from %r with spoofed sender %r", origin, sender_user_id, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 08276fdebf..f1547e3039 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -181,7 +181,7 @@ class FederationHandler(BaseHandler): try: self._sanity_check_event(pdu) except SynapseError as err: - logger.warn( + logger.warning( "[%s %s] Received event failed sanity checks", room_id, event_id ) raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id) @@ -302,7 +302,7 @@ class FederationHandler(BaseHandler): # following. if sent_to_us_directly: - logger.warn( + logger.warning( "[%s %s] Rejecting: failed to fetch %d prev events: %s", room_id, event_id, @@ -406,7 +406,7 @@ class FederationHandler(BaseHandler): state = [event_map[e] for e in six.itervalues(state_map)] auth_chain = list(auth_chains) except Exception: - logger.warn( + logger.warning( "[%s %s] Error attempting to resolve state at missing " "prev_events", room_id, @@ -519,7 +519,9 @@ class FederationHandler(BaseHandler): # We failed to get the missing events, but since we need to handle # the case of `get_missing_events` not returning the necessary # events anyway, it is safe to simply log the error and continue. - logger.warn("[%s %s]: Failed to get prev_events: %s", room_id, event_id, e) + logger.warning( + "[%s %s]: Failed to get prev_events: %s", room_id, event_id, e + ) return logger.info( @@ -546,7 +548,7 @@ class FederationHandler(BaseHandler): yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: if e.code == 403: - logger.warn( + logger.warning( "[%s %s] Received prev_event %s failed history check.", room_id, event_id, @@ -1060,7 +1062,7 @@ class FederationHandler(BaseHandler): SynapseError if the event does not pass muster """ if len(ev.prev_event_ids()) > 20: - logger.warn( + logger.warning( "Rejecting event %s which has %i prev_events", ev.event_id, len(ev.prev_event_ids()), @@ -1068,7 +1070,7 @@ class FederationHandler(BaseHandler): raise SynapseError(http_client.BAD_REQUEST, "Too many prev_events") if len(ev.auth_event_ids()) > 10: - logger.warn( + logger.warning( "Rejecting event %s which has %i auth_events", ev.event_id, len(ev.auth_event_ids()), @@ -1204,7 +1206,7 @@ class FederationHandler(BaseHandler): with nested_logging_context(p.event_id): yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: - logger.warn( + logger.warning( "Error handling queued PDU %s from %s: %s", p.event_id, origin, e ) @@ -1251,7 +1253,7 @@ class FederationHandler(BaseHandler): builder=builder ) except AuthError as e: - logger.warn("Failed to create join to %s because %s", room_id, e) + logger.warning("Failed to create join to %s because %s", room_id, e) raise e event_allowed = yield self.third_party_event_rules.check_event_allowed( @@ -1495,7 +1497,7 @@ class FederationHandler(BaseHandler): room_version, event, context, do_sig_check=False ) except AuthError as e: - logger.warn("Failed to create new leave %r because %s", event, e) + logger.warning("Failed to create new leave %r because %s", event, e) raise e return event @@ -1789,7 +1791,7 @@ class FederationHandler(BaseHandler): # cause SynapseErrors in auth.check. We don't want to give up # the attempt to federate altogether in such cases. - logger.warn("Rejecting %s because %s", e.event_id, err.msg) + logger.warning("Rejecting %s because %s", e.event_id, err.msg) if e == event: raise @@ -1845,7 +1847,9 @@ class FederationHandler(BaseHandler): try: yield self.do_auth(origin, event, context, auth_events=auth_events) except AuthError as e: - logger.warn("[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg) + logger.warning( + "[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg + ) context.rejected = RejectedReason.AUTH_ERROR @@ -1939,7 +1943,7 @@ class FederationHandler(BaseHandler): try: event_auth.check(room_version, event, auth_events=current_auth_events) except AuthError as e: - logger.warn("Soft-failing %r because %s", event, e) + logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True @defer.inlineCallbacks @@ -2038,7 +2042,7 @@ class FederationHandler(BaseHandler): try: event_auth.check(room_version, event, auth_events=auth_events) except AuthError as e: - logger.warn("Failed auth resolution for %r because %s", event, e) + logger.warning("Failed auth resolution for %r because %s", event, e) raise e @defer.inlineCallbacks @@ -2432,7 +2436,7 @@ class FederationHandler(BaseHandler): try: yield self.auth.check_from_context(room_version, event, context) except AuthError as e: - logger.warn("Denying new third party invite %r because %s", event, e) + logger.warning("Denying new third party invite %r because %s", event, e) raise e yield self._check_signature(event, context) @@ -2488,7 +2492,7 @@ class FederationHandler(BaseHandler): try: yield self.auth.check_from_context(room_version, event, context) except AuthError as e: - logger.warn("Denying third party invite %r because %s", event, e) + logger.warning("Denying third party invite %r because %s", event, e) raise e yield self._check_signature(event, context) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 46eb9ee88b..92fecbfc44 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -392,7 +392,7 @@ class GroupsLocalHandler(object): try: user_profile = yield self.profile_handler.get_profile(user_id) except Exception as e: - logger.warn("No profile for user %s: %s", user_id, e) + logger.warning("No profile for user %s: %s", user_id, e) user_profile = {} return {"state": "invite", "user_profile": user_profile} diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index ba99ddf76d..000fbf090f 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -272,7 +272,7 @@ class IdentityHandler(BaseHandler): changed = False if e.code in (400, 404, 501): # The remote server probably doesn't support unbinding (yet) - logger.warn("Received %d response while unbinding threepid", e.code) + logger.warning("Received %d response while unbinding threepid", e.code) else: logger.error("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") @@ -403,7 +403,7 @@ class IdentityHandler(BaseHandler): if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use - logger.warn( + logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " @@ -457,7 +457,7 @@ class IdentityHandler(BaseHandler): if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use - logger.warn( + logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7908a2d52c..5698e5fee0 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -688,7 +688,7 @@ class EventCreationHandler(object): try: yield self.auth.check_from_context(room_version, event, context) except AuthError as err: - logger.warn("Denying new event %r because %s", event, err) + logger.warning("Denying new event %r because %s", event, err) raise err # Ensure that we can round trip before trying to persist in db diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 8690f69d45..22e0a04da4 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -275,7 +275,7 @@ class BaseProfileHandler(BaseHandler): ratelimit=False, # Try to hide that these events aren't atomic. ) except Exception as e: - logger.warn( + logger.warning( "Failed to update join event for room %s - %s", room_id, str(e) ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2816bd8f87..445a08f445 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -922,7 +922,7 @@ class RoomEventSource(object): from_token = RoomStreamToken.parse(from_key) if from_token.topological: - logger.warn("Stream has topological part!!!! %r", from_key) + logger.warning("Stream has topological part!!!! %r", from_key) from_key = "s%s" % (from_token.stream,) app_service = self.store.get_app_service_by_user_id(user.to_string()) diff --git a/synapse/http/client.py b/synapse/http/client.py index cdf828a4ff..2df5b383b5 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -535,7 +535,7 @@ class SimpleHttpClient(object): b"Content-Length" in resp_headers and int(resp_headers[b"Content-Length"][0]) > max_size ): - logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) + logger.warning("Requested URL is too large > %r bytes" % (self.max_size,)) raise SynapseError( 502, "Requested file is too large > %r bytes" % (self.max_size,), @@ -543,7 +543,7 @@ class SimpleHttpClient(object): ) if response.code > 299: - logger.warn("Got %d when downloading %s" % (response.code, url)) + logger.warning("Got %d when downloading %s" % (response.code, url)) raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN) # TODO: if our Content-Type is HTML or something, just read the first diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index 3fe4ffb9e5..021b233a7d 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -148,7 +148,7 @@ class SrvResolver(object): # Try something in the cache, else rereaise cache_entry = self._cache.get(service_name, None) if cache_entry: - logger.warn( + logger.warning( "Failed to resolve %r, falling back to cache. %r", service_name, e ) return list(cache_entry) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 3f7c93ffcb..691380abda 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -149,7 +149,7 @@ def _handle_json_response(reactor, timeout_sec, request, response): body = yield make_deferred_yieldable(d) except Exception as e: - logger.warn( + logger.warning( "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, @@ -457,7 +457,7 @@ class MatrixFederationHttpClient(object): except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. - logger.warn( + logger.warning( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, @@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object): break except RequestSendFailed as e: - logger.warn( + logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, @@ -513,7 +513,7 @@ class MatrixFederationHttpClient(object): raise except Exception as e: - logger.warn( + logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, @@ -889,7 +889,7 @@ class MatrixFederationHttpClient(object): d.addTimeout(self.default_timeout, self.reactor) length = yield make_deferred_yieldable(d) except Exception as e: - logger.warn( + logger.warning( "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 46af27c8f6..58f9cc61c8 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -170,7 +170,7 @@ class RequestMetrics(object): tag = context.tag if context != self.start_context: - logger.warn( + logger.warning( "Context have unexpectedly changed %r, %r", context, self.start_context, diff --git a/synapse/http/server.py b/synapse/http/server.py index 2ccb210fd6..943d12c907 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -454,7 +454,7 @@ def respond_with_json( # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. if request._disconnected: - logger.warn( + logger.warning( "Not sending response to request %s, already disconnected.", request ) return diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 274c1a6a87..e9a5e46ced 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -219,13 +219,13 @@ def parse_json_value_from_request(request, allow_empty_body=False): try: content_unicode = content_bytes.decode("utf8") except UnicodeDecodeError: - logger.warn("Unable to decode UTF-8") + logger.warning("Unable to decode UTF-8") raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) try: content = json.loads(content_unicode) except Exception as e: - logger.warn("Unable to parse JSON: %s", e) + logger.warning("Unable to parse JSON: %s", e) raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) return content diff --git a/synapse/http/site.py b/synapse/http/site.py index df5274c177..ff8184a3d0 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -199,7 +199,7 @@ class SynapseRequest(Request): # It's useful to log it here so that we can get an idea of when # the client disconnects. with PreserveLoggingContext(self.logcontext): - logger.warn( + logger.warning( "Error processing request %r: %s %s", self, reason.type, reason.value ) @@ -305,7 +305,7 @@ class SynapseRequest(Request): try: self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: - logger.warn("Failed to stop metrics: %r", e) + logger.warning("Failed to stop metrics: %r", e) class XForwardedForRequest(SynapseRequest): diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 370000e377..2c1fb9ddac 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -294,7 +294,7 @@ class LoggingContext(object): """Enters this logging context into thread local storage""" old_context = self.set_current_context(self) if self.previous_context != old_context: - logger.warn( + logger.warning( "Expected previous context %r, found %r", self.previous_context, old_context, diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 6299587808..23d3678420 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -246,7 +246,7 @@ class HttpPusher(object): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. - logger.warn( + logger.warning( "Giving up on a notification to user %s, " "pushkey %s", self.user_id, self.pushkey, @@ -299,7 +299,7 @@ class HttpPusher(object): if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... - logger.warn( + logger.warning( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk, ) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 5ed9147de4..b1587183a8 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -117,7 +117,7 @@ class PushRuleEvaluatorForEvent(object): pattern = UserID.from_string(user_id).localpart if not pattern: - logger.warn("event_match condition with no pattern") + logger.warning("event_match condition with no pattern") return False # XXX: optimisation: cache our pattern regexps @@ -173,7 +173,7 @@ def _glob_matches(glob, value, word_boundary=False): regex_cache[(glob, word_boundary)] = r return r.search(value) except re.error: - logger.warn("Failed to parse glob to regex: %r", glob) + logger.warning("Failed to parse glob to regex: %r", glob) return False diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 9be37cd998..c8056b0c0c 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -180,7 +180,7 @@ class ReplicationEndpoint(object): if e.code != 504 or not cls.RETRY_ON_TIMEOUT: raise - logger.warn("%s request timed out", cls.NAME) + logger.warning("%s request timed out", cls.NAME) # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index b5f5f13a62..cc1f249740 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -144,7 +144,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): # The 'except' clause is very broad, but we need to # capture everything from DNS failures upwards # - logger.warn("Failed to reject invite: %s", e) + logger.warning("Failed to reject invite: %s", e) await self.store.locally_reject_invite(user_id, room_id) ret = {} diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index a44ceb00e7..563ce0fc53 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -168,7 +168,7 @@ class ReplicationClientHandler(object): if self.connection: self.connection.send_command(cmd) else: - logger.warn("Queuing command as not connected: %r", cmd.NAME) + logger.warning("Queuing command as not connected: %r", cmd.NAME) self.pending_commands.append(cmd) def send_federation_ack(self, token): diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 5ffdf2675d..b64f3f44b5 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -249,7 +249,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): return handler(cmd) def close(self): - logger.warn("[%s] Closing connection", self.id()) + logger.warning("[%s] Closing connection", self.id()) self.time_we_closed = self.clock.time_msec() self.transport.loseConnection() self.on_connection_closed() diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 939418ee2b..5c2a2eb593 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -286,7 +286,7 @@ class PurgeHistoryRestServlet(RestServlet): room_id, stream_ordering ) if not r: - logger.warn( + logger.warning( "[purge] purging events not possible: No event found " "(received_ts %i => stream_ordering %i)", ts, diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 8414af08cb..39a5c5e9de 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -221,7 +221,7 @@ class LoginRestServlet(RestServlet): medium, address ) if not user_id: - logger.warn( + logger.warning( "unknown 3pid identifier medium %s, address %r", medium, address ) raise LoginError(403, "", errcode=Codes.FORBIDDEN) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 80cf7126a0..332d7138b1 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -71,7 +71,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): def on_POST(self, request): if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "User password resets have been disabled due to lack of email config" ) raise SynapseError( @@ -162,7 +162,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): ) if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "Password reset emails have been disabled due to lack of an email config" ) raise SynapseError( @@ -183,7 +183,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): # Perform a 302 redirect if next_link is set if next_link: if next_link.startswith("file:///"): - logger.warn( + logger.warning( "Not redirecting to next_link as it is a local file: address" ) else: @@ -350,7 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): def on_POST(self, request): if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "Adding emails have been disabled due to lack of an email config" ) raise SynapseError( @@ -441,7 +441,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) if not self.hs.config.account_threepid_delegate_msisdn: - logger.warn( + logger.warning( "No upstream msisdn account_threepid_delegate configured on the server to " "handle this request" ) @@ -488,7 +488,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): def on_GET(self, request): if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "Adding emails have been disabled due to lack of an email config" ) raise SynapseError( @@ -515,7 +515,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): # Perform a 302 redirect if next_link is set if next_link: if next_link.startswith("file:///"): - logger.warn( + logger.warning( "Not redirecting to next_link as it is a local file: address" ) else: diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 4f24a124a6..6c7d25d411 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -106,7 +106,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): def on_POST(self, request): if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.hs.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "Email registration has been disabled due to lack of email config" ) raise SynapseError( @@ -207,7 +207,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): ) if not self.hs.config.account_threepid_delegate_msisdn: - logger.warn( + logger.warning( "No upstream msisdn account_threepid_delegate configured on the server to " "handle this request" ) @@ -266,7 +266,7 @@ class RegistrationSubmitTokenServlet(RestServlet): ) if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warn( + logger.warning( "User registration via email has been disabled due to lack of email config" ) raise SynapseError( @@ -287,7 +287,7 @@ class RegistrationSubmitTokenServlet(RestServlet): # Perform a 302 redirect if next_link is set if next_link: if next_link.startswith("file:///"): - logger.warn( + logger.warning( "Not redirecting to next_link as it is a local file: address" ) else: @@ -480,7 +480,7 @@ class RegisterRestServlet(RestServlet): # a password to work around a client bug where it sent # the 'initial_device_display_name' param alone, wiping out # the original registration params - logger.warn("Ignoring initial_device_display_name without password") + logger.warning("Ignoring initial_device_display_name without password") del body["initial_device_display_name"] session_id = self.auth_handler.get_session_id(body) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 541a6b0e10..ccd8b17b23 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -394,7 +394,7 @@ class SyncRestServlet(RestServlet): # We've had bug reports that events were coming down under the # wrong room. if event.room_id != room.room_id: - logger.warn( + logger.warning( "Event %r is under room %r instead of %r", event.event_id, room.room_id, diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index b972e152a9..bd9186fe50 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -363,7 +363,7 @@ class MediaRepository(object): }, ) except RequestSendFailed as e: - logger.warn( + logger.warning( "Request failed fetching remote media %s/%s: %r", server_name, media_id, @@ -372,7 +372,7 @@ class MediaRepository(object): raise SynapseError(502, "Failed to fetch remote media") except HttpResponseException as e: - logger.warn( + logger.warning( "HTTP error fetching remote media %s/%s: %s", server_name, media_id, @@ -383,10 +383,12 @@ class MediaRepository(object): raise SynapseError(502, "Failed to fetch remote media") except SynapseError: - logger.warn("Failed to fetch remote media %s/%s", server_name, media_id) + logger.warning( + "Failed to fetch remote media %s/%s", server_name, media_id + ) raise except NotRetryingDestination: - logger.warn("Not retrying destination %r", server_name) + logger.warning("Not retrying destination %r", server_name) raise SynapseError(502, "Failed to fetch remote media") except Exception: logger.exception( @@ -691,7 +693,7 @@ class MediaRepository(object): try: os.remove(full_path) except OSError as e: - logger.warn("Failed to remove file: %r", full_path) + logger.warning("Failed to remove file: %r", full_path) if e.errno == errno.ENOENT: pass else: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 094ebad770..5a25b6b3fc 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -136,7 +136,7 @@ class PreviewUrlResource(DirectServeResource): match = False continue if match: - logger.warn("URL %s blocked by url_blacklist entry %s", url, entry) + logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) raise SynapseError( 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN ) @@ -208,7 +208,7 @@ class PreviewUrlResource(DirectServeResource): og["og:image:width"] = dims["width"] og["og:image:height"] = dims["height"] else: - logger.warn("Couldn't get dims for %s" % url) + logger.warning("Couldn't get dims for %s" % url) # define our OG response for this media elif _is_html(media_info["media_type"]): @@ -256,7 +256,7 @@ class PreviewUrlResource(DirectServeResource): og["og:image:width"] = dims["width"] og["og:image:height"] = dims["height"] else: - logger.warn("Couldn't get dims for %s", og["og:image"]) + logger.warning("Couldn't get dims for %s", og["og:image"]) og["og:image"] = "mxc://%s/%s" % ( self.server_name, @@ -267,7 +267,7 @@ class PreviewUrlResource(DirectServeResource): else: del og["og:image"] else: - logger.warn("Failed to find any OG data in %s", url) + logger.warning("Failed to find any OG data in %s", url) og = {} logger.debug("Calculated OG for %s as %s", url, og) @@ -319,7 +319,7 @@ class PreviewUrlResource(DirectServeResource): ) except Exception as e: # FIXME: pass through 404s and other error messages nicely - logger.warn("Error downloading %s: %r", url, e) + logger.warning("Error downloading %s: %r", url, e) raise SynapseError( 500, @@ -400,7 +400,7 @@ class PreviewUrlResource(DirectServeResource): except OSError as e: # If the path doesn't exist, meh if e.errno != errno.ENOENT: - logger.warn("Failed to remove media: %r: %s", media_id, e) + logger.warning("Failed to remove media: %r: %s", media_id, e) continue removed_media.append(media_id) @@ -432,7 +432,7 @@ class PreviewUrlResource(DirectServeResource): except OSError as e: # If the path doesn't exist, meh if e.errno != errno.ENOENT: - logger.warn("Failed to remove media: %r: %s", media_id, e) + logger.warning("Failed to remove media: %r: %s", media_id, e) continue try: @@ -448,7 +448,7 @@ class PreviewUrlResource(DirectServeResource): except OSError as e: # If the path doesn't exist, meh if e.errno != errno.ENOENT: - logger.warn("Failed to remove media: %r: %s", media_id, e) + logger.warning("Failed to remove media: %r: %s", media_id, e) continue removed_media.append(media_id) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 08329884ac..931ce79be8 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -182,7 +182,7 @@ class ThumbnailResource(DirectServeResource): if file_path: yield respond_with_file(request, desired_type, file_path) else: - logger.warn("Failed to generate thumbnail") + logger.warning("Failed to generate thumbnail") respond_404(request) @defer.inlineCallbacks @@ -245,7 +245,7 @@ class ThumbnailResource(DirectServeResource): if file_path: yield respond_with_file(request, desired_type, file_path) else: - logger.warn("Failed to generate thumbnail") + logger.warning("Failed to generate thumbnail") respond_404(request) @defer.inlineCallbacks diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index c0e7f475c9..9fae2e0afe 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -83,7 +83,7 @@ class ResourceLimitsServerNotices(object): room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) if not room_id: - logger.warn("Failed to get server notices room") + logger.warning("Failed to get server notices room") return yield self._check_and_set_tags(user_id, room_id) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f5906fcd54..1a2b7ebe25 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -494,7 +494,7 @@ class SQLBaseStore(object): exception_callbacks = [] if LoggingContext.current_context() == LoggingContext.sentinel: - logger.warn("Starting db txn '%s' from sentinel context", desc) + logger.warning("Starting db txn '%s' from sentinel context", desc) try: result = yield self.runWithConnection( @@ -532,7 +532,7 @@ class SQLBaseStore(object): """ parent_context = LoggingContext.current_context() if parent_context == LoggingContext.sentinel: - logger.warn( + logger.warning( "Starting db connection from sentinel context: metrics will be lost" ) parent_context = None @@ -719,7 +719,7 @@ class SQLBaseStore(object): raise # presumably we raced with another transaction: let's retry. - logger.warn( + logger.warning( "IntegrityError when upserting into %s; retrying: %s", table, e ) diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/data_stores/main/pusher.py index f005c1ae0a..d76861cdc0 100644 --- a/synapse/storage/data_stores/main/pusher.py +++ b/synapse/storage/data_stores/main/pusher.py @@ -44,7 +44,7 @@ class PusherWorkerStore(SQLBaseStore): r["data"] = json.loads(dataJson) except Exception as e: - logger.warn( + logger.warning( "Invalid JSON in data for pusher %d: %s, %s", r["id"], dataJson, diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py index 0e08497452..a59b8331e1 100644 --- a/synapse/storage/data_stores/main/search.py +++ b/synapse/storage/data_stores/main/search.py @@ -196,7 +196,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore): " ON event_search USING GIN (vector)" ) except psycopg2.ProgrammingError as e: - logger.warn( + logger.warning( "Ignoring error %r when trying to switch from GIST to GIN", e ) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index b60a604474..5c4de2e69f 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -309,7 +309,7 @@ class Linearizer(object): ) else: - logger.warn( + logger.warning( "Unexpected exception waiting for linearizer lock %r for key %r", self.name, key, diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 43fd65d693..da5077b471 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -107,7 +107,7 @@ def register_cache(cache_type, cache_name, cache, collect_callback=None): if collect_callback: collect_callback() except Exception as e: - logger.warn("Error calculating metrics for %s: %s", cache_name, e) + logger.warning("Error calculating metrics for %s: %s", cache_name, e) raise yield GaugeMetricFamily("__unused", "") diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 4b1bcdf23c..3286804322 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -119,7 +119,7 @@ class Measure(object): context = LoggingContext.current_context() if context != self.start_context: - logger.warn( + logger.warning( "Context has unexpectedly changed from '%s' to '%s'. (%r)", self.start_context, context, @@ -128,7 +128,7 @@ class Measure(object): return if not context: - logger.warn("Expected context. (%r)", self.name) + logger.warning("Expected context. (%r)", self.name) return current = context.get_resource_usage() @@ -140,7 +140,7 @@ class Measure(object): block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec) except ValueError: - logger.warn( + logger.warning( "Failed to save metrics! OLD: %r, NEW: %r", self.start_usage, current ) diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py index 6c0f2bb0cf..207cd17c2a 100644 --- a/synapse/util/rlimit.py +++ b/synapse/util/rlimit.py @@ -33,4 +33,4 @@ def change_resource_limit(soft_file_no): resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) ) except (ValueError, resource.error) as e: - logger.warn("Failed to set file or core limit: %s", e) + logger.warning("Failed to set file or core limit: %s", e) -- cgit 1.5.1 From bc7de87650c2646cdc388f06a2a5bb6f94fc5c5c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 16 Dec 2019 12:26:28 +0000 Subject: Persist auth/state events at backwards extremities when we fetch them (#6526) The main point here is to make sure that the state returned by _get_state_in_room has been authed before we try to use it as state in the room. --- changelog.d/6526.bugfix | 1 + synapse/handlers/federation.py | 247 +++++++++++++---------------------------- synapse/util/async_helpers.py | 4 +- 3 files changed, 83 insertions(+), 169 deletions(-) create mode 100644 changelog.d/6526.bugfix (limited to 'synapse/util/async_helpers.py') diff --git a/changelog.d/6526.bugfix b/changelog.d/6526.bugfix new file mode 100644 index 0000000000..53214b0748 --- /dev/null +++ b/changelog.d/6526.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs. \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1d39a9a4f5..3f480f2056 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -65,8 +65,7 @@ from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRes from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.types import UserID, get_domain_from_id -from synapse.util import batch_iter, unwrapFirstError -from synapse.util.async_helpers import Linearizer +from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -238,7 +237,6 @@ class FederationHandler(BaseHandler): return None state = None - auth_chain = [] # Get missing pdus if necessary. if not pdu.internal_metadata.is_outlier(): @@ -348,7 +346,6 @@ class FederationHandler(BaseHandler): # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. - auth_chains = set() event_map = {event_id: pdu} try: # Get the state of the events we know about @@ -369,24 +366,14 @@ class FederationHandler(BaseHandler): "Requesting state at missing prev_event %s", event_id, ) - room_version = await self.store.get_room_version(room_id) - with nested_logging_context(p): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. - ( - remote_state, - got_auth_chain, - ) = await self._get_state_for_room( + (remote_state, _,) = await self._get_state_for_room( origin, room_id, p, include_event_in_state=True ) - # XXX hrm I'm not convinced that duplicate events will compare - # for equality, so I'm not sure this does what the author - # hoped. - auth_chains.update(got_auth_chain) - remote_state_map = { (x.type, x.state_key): x.event_id for x in remote_state } @@ -395,6 +382,7 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x + room_version = await self.store.get_room_version(room_id) state_map = await resolve_events_with_store( room_id, room_version, @@ -416,7 +404,6 @@ class FederationHandler(BaseHandler): event_map.update(evs) state = [event_map[e] for e in six.itervalues(state_map)] - auth_chain = list(auth_chains) except Exception: logger.warning( "[%s %s] Error attempting to resolve state at missing " @@ -432,9 +419,7 @@ class FederationHandler(BaseHandler): affected=event_id, ) - await self._process_received_pdu( - origin, pdu, state=state, auth_chain=auth_chain - ) + await self._process_received_pdu(origin, pdu, state=state) async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): """ @@ -633,10 +618,7 @@ class FederationHandler(BaseHandler): ) -> Dict[str, EventBase]: """Fetch events from a remote destination, checking if we already have them. - Args: - destination - room_id - event_ids + Persists any events we don't already have as outliers. If we fail to fetch any of the events, a warning will be logged, and the event will be omitted from the result. Likewise, any events which turn out not to @@ -656,27 +638,15 @@ class FederationHandler(BaseHandler): room_id, ) - room_version = await self.store.get_room_version(room_id) - - # XXX 20 requests at once? really? - for batch in batch_iter(missing_events, 20): - deferreds = [ - run_in_background( - self.federation_client.get_pdu, - destinations=[destination], - event_id=e_id, - room_version=room_version, - ) - for e_id in batch - ] - - res = await make_deferred_yieldable( - defer.DeferredList(deferreds, consumeErrors=True) - ) + await self._get_events_and_persist( + destination=destination, room_id=room_id, events=missing_events + ) - for success, result in res: - if success and result: - fetched_events[result.event_id] = result + # we need to make sure we re-load from the database to get the rejected + # state correct. + fetched_events.update( + (await self.store.get_events(missing_events, allow_rejected=True)) + ) # check for events which were in the wrong room. # @@ -705,50 +675,26 @@ class FederationHandler(BaseHandler): return fetched_events - async def _process_received_pdu(self, origin, event, state, auth_chain): + async def _process_received_pdu( + self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], + ): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. + + Args: + origin: server sending the event + + event: event to be persisted + + state: Normally None, but if we are handling a gap in the graph + (ie, we are missing one or more prev_events), the resolved state at the + event """ room_id = event.room_id event_id = event.event_id logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) - event_ids = set() - if state: - event_ids |= {e.event_id for e in state} - if auth_chain: - event_ids |= {e.event_id for e in auth_chain} - - seen_ids = await self.store.have_seen_events(event_ids) - - if state and auth_chain is not None: - # If we have any state or auth_chain given to us by the replication - # layer, then we should handle them (if we haven't before.) - - event_infos = [] - - for e in itertools.chain(auth_chain, state): - if e.event_id in seen_ids: - continue - e.internal_metadata.outlier = True - auth_ids = e.auth_event_ids() - auth = { - (e.type, e.state_key): e - for e in auth_chain - if e.event_id in auth_ids or e.type == EventTypes.Create - } - event_infos.append(_NewEventInfo(event=e, auth_events=auth)) - seen_ids.add(e.event_id) - - logger.info( - "[%s %s] persisting newly-received auth/state events %s", - room_id, - event_id, - [e.event.event_id for e in event_infos], - ) - await self._handle_new_events(origin, event_infos) - try: context = await self._handle_new_event(origin, event, state=state) except AuthError as e: @@ -803,8 +749,6 @@ class FederationHandler(BaseHandler): if dest == self.server_name: raise SynapseError(400, "Can't backfill from self.") - room_version = await self.store.get_room_version(room_id) - events = await self.federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) @@ -833,6 +777,9 @@ class FederationHandler(BaseHandler): event_ids = set(e.event_id for e in events) + # build a list of events whose prev_events weren't in the batch. + # (XXX: this will include events whose prev_events we already have; that doesn't + # sound right?) edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids] logger.info("backfill: Got %d events with %d edges", len(events), len(edges)) @@ -861,95 +808,11 @@ class FederationHandler(BaseHandler): auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) - missing_auth = required_auth - set(auth_events) - failed_to_fetch = set() - - # Try and fetch any missing auth events from both DB and remote servers. - # We repeatedly do this until we stop finding new auth events. - while missing_auth - failed_to_fetch: - logger.info("Missing auth for backfill: %r", missing_auth) - ret_events = await self.store.get_events(missing_auth - failed_to_fetch) - auth_events.update(ret_events) - - required_auth.update( - a_id for event in ret_events.values() for a_id in event.auth_event_ids() - ) - missing_auth = required_auth - set(auth_events) - - if missing_auth - failed_to_fetch: - logger.info( - "Fetching missing auth for backfill: %r", - missing_auth - failed_to_fetch, - ) - - results = await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.federation_client.get_pdu, - [dest], - event_id, - room_version=room_version, - outlier=True, - timeout=10000, - ) - for event_id in missing_auth - failed_to_fetch - ], - consumeErrors=True, - ) - ).addErrback(unwrapFirstError) - auth_events.update({a.event_id: a for a in results if a}) - required_auth.update( - a_id - for event in results - if event - for a_id in event.auth_event_ids() - ) - missing_auth = required_auth - set(auth_events) - - failed_to_fetch = missing_auth - set(auth_events) - - seen_events = await self.store.have_seen_events( - set(auth_events.keys()) | set(state_events.keys()) - ) - - # We now have a chunk of events plus associated state and auth chain to - # persist. We do the persistence in two steps: - # 1. Auth events and state get persisted as outliers, plus the - # backward extremities get persisted (as non-outliers). - # 2. The rest of the events in the chunk get persisted one by one, as - # each one depends on the previous event for its state. - # - # The important thing is that events in the chunk get persisted as - # non-outliers, including when those events are also in the state or - # auth chain. Caution must therefore be taken to ensure that they are - # not accidentally marked as outliers. - # Step 1a: persist auth events that *don't* appear in the chunk ev_infos = [] - for a in auth_events.values(): - # We only want to persist auth events as outliers that we haven't - # seen and aren't about to persist as part of the backfilled chunk. - if a.event_id in seen_events or a.event_id in event_map: - continue - a.internal_metadata.outlier = True - ev_infos.append( - _NewEventInfo( - event=a, - auth_events={ - ( - auth_events[a_id].type, - auth_events[a_id].state_key, - ): auth_events[a_id] - for a_id in a.auth_event_ids() - if a_id in auth_events - }, - ) - ) - - # Step 1b: persist the events in the chunk we fetched state for (i.e. - # the backwards extremities) as non-outliers. + # Step 1: persist the events in the chunk we fetched state for (i.e. + # the backwards extremities), with custom auth events and state for e_id in events_to_state: # For paranoia we ensure that these events are marked as # non-outliers @@ -1191,6 +1054,56 @@ class FederationHandler(BaseHandler): return False + async def _get_events_and_persist( + self, destination: str, room_id: str, events: Iterable[str] + ): + """Fetch the given events from a server, and persist them as outliers. + + Logs a warning if we can't find the given event. + """ + + room_version = await self.store.get_room_version(room_id) + + event_infos = [] + + async def get_event(event_id: str): + with nested_logging_context(event_id): + try: + event = await self.federation_client.get_pdu( + [destination], event_id, room_version, outlier=True, + ) + if event is None: + logger.warning( + "Server %s didn't return event %s", destination, event_id, + ) + return + + # recursively fetch the auth events for this event + auth_events = await self._get_events_from_store_or_dest( + destination, room_id, event.auth_event_ids() + ) + auth = {} + for auth_event_id in event.auth_event_ids(): + ae = auth_events.get(auth_event_id) + if ae: + auth[(ae.type, ae.state_key)] = ae + + event_infos.append(_NewEventInfo(event, None, auth)) + + except Exception as e: + logger.warning( + "Error fetching missing state/auth event %s: %s %s", + event_id, + type(e), + e, + ) + + await concurrently_execute(get_event, events, 5) + + await self._handle_new_events( + destination, event_infos, + ) + def _sanity_check_event(self, ev): """ Do some early sanity checks of a received event diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 5c4de2e69f..04b6abdc24 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -140,8 +140,8 @@ def concurrently_execute(func, args, limit): Args: func (func): Function to execute, should return a deferred or coroutine. - args (list): List of arguments to pass to func, each invocation of func - gets a signle argument. + args (Iterable): List of arguments to pass to func, each invocation of func + gets a single argument. limit (int): Maximum number of conccurent executions. Returns: -- cgit 1.5.1 From ae5b3104f0023171b2bb89f08a066e5974ee7666 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Feb 2020 17:10:54 +0000 Subject: Fix stacktraces when using ObservableDeferred and async/await (#6836) --- changelog.d/6836.misc | 1 + synapse/util/async_helpers.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6836.misc (limited to 'synapse/util/async_helpers.py') diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc new file mode 100644 index 0000000000..232488e1e5 --- /dev/null +++ b/changelog.d/6836.misc @@ -0,0 +1 @@ +Fix stacktraces when using `ObservableDeferred` and async/await. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 04b6abdc24..581dffd8a0 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -73,6 +73,10 @@ class ObservableDeferred(object): def errback(f): object.__setattr__(self, "_result", (False, f)) while self._observers: + # This is a little bit of magic to correctly propagate stack + # traces when we `await` on one of the observer deferreds. + f.value.__failure__ = f + try: # TODO: Handle errors here. self._observers.pop().errback(f) -- cgit 1.5.1