summary refs log tree commit diff
path: root/synapse/util
diff options
context:
space:
mode:
authorBrendan Abolivier <babolivier@matrix.org>2019-07-08 15:49:23 +0100
committerBrendan Abolivier <babolivier@matrix.org>2019-07-08 15:49:23 +0100
commit57eacee4f4cff3a663a6e22834a82f9c5783a56d (patch)
treeb5714f78b8c129ad8aca1689e2d7ddecb20c3bed /synapse/util
parentChangelog (diff)
parentUpdate ModuleApi to avoid register(generate_token=True) (#5640) (diff)
downloadsynapse-57eacee4f4cff3a663a6e22834a82f9c5783a56d.tar.xz
Merge branch 'develop' into babolivier/invite-json
Diffstat (limited to 'synapse/util')
-rw-r--r--synapse/util/__init__.py16
-rw-r--r--synapse/util/async_helpers.py9
-rw-r--r--synapse/util/caches/descriptors.py11
-rw-r--r--synapse/util/caches/response_cache.py4
-rw-r--r--synapse/util/distributor.py2
-rw-r--r--synapse/util/file_consumer.py2
-rw-r--r--synapse/util/logcontext.py688
-rw-r--r--synapse/util/logformatter.py44
-rw-r--r--synapse/util/logutils.py194
-rw-r--r--synapse/util/metrics.py2
-rw-r--r--synapse/util/ratelimitutils.py18
-rw-r--r--synapse/util/retryutils.py4
12 files changed, 67 insertions, 927 deletions
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index dcc747cac1..f506b2a695 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -21,7 +21,7 @@ import attr
 
 from twisted.internet import defer, task
 
-from synapse.util.logcontext import PreserveLoggingContext
+from synapse.logging import context
 
 logger = logging.getLogger(__name__)
 
@@ -46,7 +46,7 @@ class Clock(object):
     @defer.inlineCallbacks
     def sleep(self, seconds):
         d = defer.Deferred()
-        with PreserveLoggingContext():
+        with context.PreserveLoggingContext():
             self._reactor.callLater(seconds, d.callback, seconds)
             res = yield d
         defer.returnValue(res)
@@ -62,7 +62,10 @@ class Clock(object):
     def looping_call(self, f, msec):
         """Call a function repeatedly.
 
-         Waits `msec` initially before calling `f` for the first time.
+        Waits `msec` initially before calling `f` for the first time.
+
+        Note that the function will be called with no logcontext, so if it is anything
+        other than trivial, you probably want to wrap it in run_as_background_process.
 
         Args:
             f(function): The function to call repeatedly.
@@ -77,6 +80,9 @@ class Clock(object):
     def call_later(self, delay, callback, *args, **kwargs):
         """Call something later
 
+        Note that the function will be called with no logcontext, so if it is anything
+        other than trivial, you probably want to wrap it in run_as_background_process.
+
         Args:
             delay(float): How long to wait in seconds.
             callback(function): Function to call
@@ -85,10 +91,10 @@ class Clock(object):
         """
 
         def wrapped_callback(*args, **kwargs):
-            with PreserveLoggingContext():
+            with context.PreserveLoggingContext():
                 callback(*args, **kwargs)
 
-        with PreserveLoggingContext():
+        with context.PreserveLoggingContext():
             return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs)
 
     def cancel_call_later(self, timer, ignore_errs=False):
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 7757b8708a..58a6b8764f 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -23,13 +23,12 @@ from twisted.internet import defer
 from twisted.internet.defer import CancelledError
 from twisted.python import failure
 
-from synapse.util import Clock, logcontext, unwrapFirstError
-
-from .logcontext import (
+from synapse.logging.context import (
     PreserveLoggingContext,
     make_deferred_yieldable,
     run_in_background,
 )
+from synapse.util import Clock, unwrapFirstError
 
 logger = logging.getLogger(__name__)
 
@@ -153,7 +152,7 @@ def concurrently_execute(func, args, limit):
         except StopIteration:
             pass
 
-    return logcontext.make_deferred_yieldable(
+    return make_deferred_yieldable(
         defer.gatherResults(
             [run_in_background(_concurrently_execute_inner) for _ in range(limit)],
             consumeErrors=True,
@@ -174,7 +173,7 @@ def yieldable_gather_results(func, iter, *args, **kwargs):
         Deferred[list]: Resolved when all functions have been invoked, or errors if
         one of the function calls fails.
     """
-    return logcontext.make_deferred_yieldable(
+    return make_deferred_yieldable(
         defer.gatherResults(
             [run_in_background(func, item, *args, **kwargs) for item in iter],
             consumeErrors=True,
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index d2f25063aa..675db2f448 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -24,7 +24,8 @@ from six import itervalues, string_types
 
 from twisted.internet import defer
 
-from synapse.util import logcontext, unwrapFirstError
+from synapse.logging.context import make_deferred_yieldable, preserve_fn
+from synapse.util import unwrapFirstError
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.lrucache import LruCache
@@ -388,7 +389,7 @@ class CacheDescriptor(_CacheDescriptorBase):
 
             except KeyError:
                 ret = defer.maybeDeferred(
-                    logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs
+                    preserve_fn(self.function_to_call), obj, *args, **kwargs
                 )
 
                 def onErr(f):
@@ -408,7 +409,7 @@ class CacheDescriptor(_CacheDescriptorBase):
                 observer = result_d.observe()
 
             if isinstance(observer, defer.Deferred):
-                return logcontext.make_deferred_yieldable(observer)
+                return make_deferred_yieldable(observer)
             else:
                 return observer
 
@@ -563,7 +564,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
 
                 cached_defers.append(
                     defer.maybeDeferred(
-                        logcontext.preserve_fn(self.function_to_call), **args_to_call
+                        preserve_fn(self.function_to_call), **args_to_call
                     ).addCallbacks(complete_all, errback)
                 )
 
@@ -571,7 +572,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
                 d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks(
                     lambda _: results, unwrapFirstError
                 )
-                return logcontext.make_deferred_yieldable(d)
+                return make_deferred_yieldable(d)
             else:
                 return results
 
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index cbe54d45dd..d6908e169d 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -16,9 +16,9 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches import register_cache
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 logger = logging.getLogger(__name__)
 
@@ -78,7 +78,7 @@ class ResponseCache(object):
 
         *deferred* should run its callbacks in the sentinel logcontext (ie,
         you should wrap normal synapse deferreds with
-        logcontext.run_in_background).
+        synapse.logging.context.run_in_background).
 
         Can return either a new Deferred (which also doesn't follow the synapse
         logcontext rules), or, if *deferred* was already complete, the actual
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index 5a79db821c..45af8d3eeb 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -17,8 +17,8 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py
index 629ed44149..8b17d1c8b8 100644
--- a/synapse/util/file_consumer.py
+++ b/synapse/util/file_consumer.py
@@ -17,7 +17,7 @@ from six.moves import queue
 
 from twisted.internet import threads
 
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 
 
 class BackgroundFileConsumer(object):
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index 6b0d2deea0..40e5c10a49 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,668 +12,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-""" Thread-local-alike tracking of log contexts within synapse
-
-This module provides objects and utilities for tracking contexts through
-synapse code, so that log lines can include a request identifier, and so that
-CPU and database activity can be accounted for against the request that caused
-them.
-
-See doc/log_contexts.rst for details on how this works.
+"""
+Backwards compatibility re-exports of ``synapse.logging.context`` functionality.
 """
 
-import logging
-import threading
-
-from twisted.internet import defer, threads
-
-logger = logging.getLogger(__name__)
-
-try:
-    import resource
-
-    # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
-    # to be 1 on linux so we hard code it.
-    RUSAGE_THREAD = 1
-
-    # If the system doesn't support RUSAGE_THREAD then this should throw an
-    # exception.
-    resource.getrusage(RUSAGE_THREAD)
-
-    def get_thread_resource_usage():
-        return resource.getrusage(RUSAGE_THREAD)
-
-
-except Exception:
-    # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
-    # won't track resource usage by returning None.
-    def get_thread_resource_usage():
-        return None
-
-
-class ContextResourceUsage(object):
-    """Object for tracking the resources used by a log context
-
-    Attributes:
-        ru_utime (float): user CPU time (in seconds)
-        ru_stime (float): system CPU time (in seconds)
-        db_txn_count (int): number of database transactions done
-        db_sched_duration_sec (float): amount of time spent waiting for a
-            database connection
-        db_txn_duration_sec (float): amount of time spent doing database
-            transactions (excluding scheduling time)
-        evt_db_fetch_count (int): number of events requested from the database
-    """
-
-    __slots__ = [
-        "ru_stime",
-        "ru_utime",
-        "db_txn_count",
-        "db_txn_duration_sec",
-        "db_sched_duration_sec",
-        "evt_db_fetch_count",
-    ]
-
-    def __init__(self, copy_from=None):
-        """Create a new ContextResourceUsage
-
-        Args:
-            copy_from (ContextResourceUsage|None): if not None, an object to
-                copy stats from
-        """
-        if copy_from is None:
-            self.reset()
-        else:
-            self.ru_utime = copy_from.ru_utime
-            self.ru_stime = copy_from.ru_stime
-            self.db_txn_count = copy_from.db_txn_count
-
-            self.db_txn_duration_sec = copy_from.db_txn_duration_sec
-            self.db_sched_duration_sec = copy_from.db_sched_duration_sec
-            self.evt_db_fetch_count = copy_from.evt_db_fetch_count
-
-    def copy(self):
-        return ContextResourceUsage(copy_from=self)
-
-    def reset(self):
-        self.ru_stime = 0.0
-        self.ru_utime = 0.0
-        self.db_txn_count = 0
-
-        self.db_txn_duration_sec = 0
-        self.db_sched_duration_sec = 0
-        self.evt_db_fetch_count = 0
-
-    def __repr__(self):
-        return (
-            "<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
-            "db_txn_count='%r', db_txn_duration_sec='%r', "
-            "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
-        ) % (
-            self.ru_stime,
-            self.ru_utime,
-            self.db_txn_count,
-            self.db_txn_duration_sec,
-            self.db_sched_duration_sec,
-            self.evt_db_fetch_count,
-        )
-
-    def __iadd__(self, other):
-        """Add another ContextResourceUsage's stats to this one's.
-
-        Args:
-            other (ContextResourceUsage): the other resource usage object
-        """
-        self.ru_utime += other.ru_utime
-        self.ru_stime += other.ru_stime
-        self.db_txn_count += other.db_txn_count
-        self.db_txn_duration_sec += other.db_txn_duration_sec
-        self.db_sched_duration_sec += other.db_sched_duration_sec
-        self.evt_db_fetch_count += other.evt_db_fetch_count
-        return self
-
-    def __isub__(self, other):
-        self.ru_utime -= other.ru_utime
-        self.ru_stime -= other.ru_stime
-        self.db_txn_count -= other.db_txn_count
-        self.db_txn_duration_sec -= other.db_txn_duration_sec
-        self.db_sched_duration_sec -= other.db_sched_duration_sec
-        self.evt_db_fetch_count -= other.evt_db_fetch_count
-        return self
-
-    def __add__(self, other):
-        res = ContextResourceUsage(copy_from=self)
-        res += other
-        return res
-
-    def __sub__(self, other):
-        res = ContextResourceUsage(copy_from=self)
-        res -= other
-        return res
-
-
-class LoggingContext(object):
-    """Additional context for log formatting. Contexts are scoped within a
-    "with" block.
-
-    If a parent is given when creating a new context, then:
-        - logging fields are copied from the parent to the new context on entry
-        - when the new context exits, the cpu usage stats are copied from the
-          child to the parent
-
-    Args:
-        name (str): Name for the context for debugging.
-        parent_context (LoggingContext|None): The parent of the new context
-    """
-
-    __slots__ = [
-        "previous_context",
-        "name",
-        "parent_context",
-        "_resource_usage",
-        "usage_start",
-        "main_thread",
-        "alive",
-        "request",
-        "tag",
-    ]
-
-    thread_local = threading.local()
-
-    class Sentinel(object):
-        """Sentinel to represent the root context"""
-
-        __slots__ = []
-
-        def __str__(self):
-            return "sentinel"
-
-        def copy_to(self, record):
-            pass
-
-        def start(self):
-            pass
-
-        def stop(self):
-            pass
-
-        def add_database_transaction(self, duration_sec):
-            pass
-
-        def add_database_scheduled(self, sched_sec):
-            pass
-
-        def record_event_fetch(self, event_count):
-            pass
-
-        def __nonzero__(self):
-            return False
-
-        __bool__ = __nonzero__  # python3
-
-    sentinel = Sentinel()
-
-    def __init__(self, name=None, parent_context=None, request=None):
-        self.previous_context = LoggingContext.current_context()
-        self.name = name
-
-        # track the resources used by this context so far
-        self._resource_usage = ContextResourceUsage()
-
-        # If alive has the thread resource usage when the logcontext last
-        # became active.
-        self.usage_start = None
-
-        self.main_thread = threading.current_thread()
-        self.request = None
-        self.tag = ""
-        self.alive = True
-
-        self.parent_context = parent_context
-
-        if self.parent_context is not None:
-            self.parent_context.copy_to(self)
-
-        if request is not None:
-            # the request param overrides the request from the parent context
-            self.request = request
-
-    def __str__(self):
-        if self.request:
-            return str(self.request)
-        return "%s@%x" % (self.name, id(self))
-
-    @classmethod
-    def current_context(cls):
-        """Get the current logging context from thread local storage
-
-        Returns:
-            LoggingContext: the current logging context
-        """
-        return getattr(cls.thread_local, "current_context", cls.sentinel)
-
-    @classmethod
-    def set_current_context(cls, context):
-        """Set the current logging context in thread local storage
-        Args:
-            context(LoggingContext): The context to activate.
-        Returns:
-            The context that was previously active
-        """
-        current = cls.current_context()
-
-        if current is not context:
-            current.stop()
-            cls.thread_local.current_context = context
-            context.start()
-        return current
-
-    def __enter__(self):
-        """Enters this logging context into thread local storage"""
-        old_context = self.set_current_context(self)
-        if self.previous_context != old_context:
-            logger.warn(
-                "Expected previous context %r, found %r",
-                self.previous_context,
-                old_context,
-            )
-        self.alive = True
-
-        return self
-
-    def __exit__(self, type, value, traceback):
-        """Restore the logging context in thread local storage to the state it
-        was before this context was entered.
-        Returns:
-            None to avoid suppressing any exceptions that were thrown.
-        """
-        current = self.set_current_context(self.previous_context)
-        if current is not self:
-            if current is self.sentinel:
-                logger.warning("Expected logging context %s was lost", self)
-            else:
-                logger.warning(
-                    "Expected logging context %s but found %s", self, current
-                )
-        self.previous_context = None
-        self.alive = False
-
-        # if we have a parent, pass our CPU usage stats on
-        if self.parent_context is not None and hasattr(
-            self.parent_context, "_resource_usage"
-        ):
-            self.parent_context._resource_usage += self._resource_usage
-
-            # reset them in case we get entered again
-            self._resource_usage.reset()
-
-    def copy_to(self, record):
-        """Copy logging fields from this context to a log record or
-        another LoggingContext
-        """
-
-        # 'request' is the only field we currently use in the logger, so that's
-        # all we need to copy
-        record.request = self.request
-
-    def start(self):
-        if threading.current_thread() is not self.main_thread:
-            logger.warning("Started logcontext %s on different thread", self)
-            return
-
-        # If we haven't already started record the thread resource usage so
-        # far
-        if not self.usage_start:
-            self.usage_start = get_thread_resource_usage()
-
-    def stop(self):
-        if threading.current_thread() is not self.main_thread:
-            logger.warning("Stopped logcontext %s on different thread", self)
-            return
-
-        # When we stop, let's record the cpu used since we started
-        if not self.usage_start:
-            logger.warning("Called stop on logcontext %s without calling start", self)
-            return
-
-        utime_delta, stime_delta = self._get_cputime()
-        self._resource_usage.ru_utime += utime_delta
-        self._resource_usage.ru_stime += stime_delta
-
-        self.usage_start = None
-
-    def get_resource_usage(self):
-        """Get resources used by this logcontext so far.
-
-        Returns:
-            ContextResourceUsage: a *copy* of the object tracking resource
-                usage so far
-        """
-        # we always return a copy, for consistency
-        res = self._resource_usage.copy()
-
-        # If we are on the correct thread and we're currently running then we
-        # can include resource usage so far.
-        is_main_thread = threading.current_thread() is self.main_thread
-        if self.alive and self.usage_start and is_main_thread:
-            utime_delta, stime_delta = self._get_cputime()
-            res.ru_utime += utime_delta
-            res.ru_stime += stime_delta
-
-        return res
-
-    def _get_cputime(self):
-        """Get the cpu usage time so far
-
-        Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
-        """
-        current = get_thread_resource_usage()
-
-        utime_delta = current.ru_utime - self.usage_start.ru_utime
-        stime_delta = current.ru_stime - self.usage_start.ru_stime
-
-        # sanity check
-        if utime_delta < 0:
-            logger.error(
-                "utime went backwards! %f < %f",
-                current.ru_utime,
-                self.usage_start.ru_utime,
-            )
-            utime_delta = 0
-
-        if stime_delta < 0:
-            logger.error(
-                "stime went backwards! %f < %f",
-                current.ru_stime,
-                self.usage_start.ru_stime,
-            )
-            stime_delta = 0
-
-        return utime_delta, stime_delta
-
-    def add_database_transaction(self, duration_sec):
-        if duration_sec < 0:
-            raise ValueError("DB txn time can only be non-negative")
-        self._resource_usage.db_txn_count += 1
-        self._resource_usage.db_txn_duration_sec += duration_sec
-
-    def add_database_scheduled(self, sched_sec):
-        """Record a use of the database pool
-
-        Args:
-            sched_sec (float): number of seconds it took us to get a
-                connection
-        """
-        if sched_sec < 0:
-            raise ValueError("DB scheduling time can only be non-negative")
-        self._resource_usage.db_sched_duration_sec += sched_sec
-
-    def record_event_fetch(self, event_count):
-        """Record a number of events being fetched from the db
-
-        Args:
-            event_count (int): number of events being fetched
-        """
-        self._resource_usage.evt_db_fetch_count += event_count
-
-
-class LoggingContextFilter(logging.Filter):
-    """Logging filter that adds values from the current logging context to each
-    record.
-    Args:
-        **defaults: Default values to avoid formatters complaining about
-            missing fields
-    """
-
-    def __init__(self, **defaults):
-        self.defaults = defaults
-
-    def filter(self, record):
-        """Add each fields from the logging contexts to the record.
-        Returns:
-            True to include the record in the log output.
-        """
-        context = LoggingContext.current_context()
-        for key, value in self.defaults.items():
-            setattr(record, key, value)
-
-        # context should never be None, but if it somehow ends up being, then
-        # we end up in a death spiral of infinite loops, so let's check, for
-        # robustness' sake.
-        if context is not None:
-            context.copy_to(record)
-
-        return True
-
-
-class PreserveLoggingContext(object):
-    """Captures the current logging context and restores it when the scope is
-    exited. Used to restore the context after a function using
-    @defer.inlineCallbacks is resumed by a callback from the reactor."""
-
-    __slots__ = ["current_context", "new_context", "has_parent"]
-
-    def __init__(self, new_context=None):
-        if new_context is None:
-            new_context = LoggingContext.sentinel
-        self.new_context = new_context
-
-    def __enter__(self):
-        """Captures the current logging context"""
-        self.current_context = LoggingContext.set_current_context(self.new_context)
-
-        if self.current_context:
-            self.has_parent = self.current_context.previous_context is not None
-            if not self.current_context.alive:
-                logger.debug("Entering dead context: %s", self.current_context)
-
-    def __exit__(self, type, value, traceback):
-        """Restores the current logging context"""
-        context = LoggingContext.set_current_context(self.current_context)
-
-        if context != self.new_context:
-            if context is LoggingContext.sentinel:
-                logger.warning("Expected logging context %s was lost", self.new_context)
-            else:
-                logger.warning(
-                    "Expected logging context %s but found %s",
-                    self.new_context,
-                    context,
-                )
-
-        if self.current_context is not LoggingContext.sentinel:
-            if not self.current_context.alive:
-                logger.debug("Restoring dead context: %s", self.current_context)
-
-
-def nested_logging_context(suffix, parent_context=None):
-    """Creates a new logging context as a child of another.
-
-    The nested logging context will have a 'request' made up of the parent context's
-    request, plus the given suffix.
-
-    CPU/db usage stats will be added to the parent context's on exit.
-
-    Normal usage looks like:
-
-        with nested_logging_context(suffix):
-            # ... do stuff
-
-    Args:
-        suffix (str): suffix to add to the parent context's 'request'.
-        parent_context (LoggingContext|None): parent context. Will use the current context
-            if None.
-
-    Returns:
-        LoggingContext: new logging context.
-    """
-    if parent_context is None:
-        parent_context = LoggingContext.current_context()
-    return LoggingContext(
-        parent_context=parent_context, request=parent_context.request + "-" + suffix
-    )
-
-
-def preserve_fn(f):
-    """Function decorator which wraps the function with run_in_background"""
-
-    def g(*args, **kwargs):
-        return run_in_background(f, *args, **kwargs)
-
-    return g
-
-
-def run_in_background(f, *args, **kwargs):
-    """Calls a function, ensuring that the current context is restored after
-    return from the function, and that the sentinel context is set once the
-    deferred returned by the function completes.
-
-    Useful for wrapping functions that return a deferred which you don't yield
-    on (for instance because you want to pass it to deferred.gatherResults()).
-
-    Note that if you completely discard the result, you should make sure that
-    `f` doesn't raise any deferred exceptions, otherwise a scary-looking
-    CRITICAL error about an unhandled error will be logged without much
-    indication about where it came from.
-    """
-    current = LoggingContext.current_context()
-    try:
-        res = f(*args, **kwargs)
-    except:  # noqa: E722
-        # the assumption here is that the caller doesn't want to be disturbed
-        # by synchronous exceptions, so let's turn them into Failures.
-        return defer.fail()
-
-    if not isinstance(res, defer.Deferred):
-        return res
-
-    if res.called and not res.paused:
-        # The function should have maintained the logcontext, so we can
-        # optimise out the messing about
-        return res
-
-    # The function may have reset the context before returning, so
-    # we need to restore it now.
-    ctx = LoggingContext.set_current_context(current)
-
-    # The original context will be restored when the deferred
-    # completes, but there is nothing waiting for it, so it will
-    # get leaked into the reactor or some other function which
-    # wasn't expecting it. We therefore need to reset the context
-    # here.
-    #
-    # (If this feels asymmetric, consider it this way: we are
-    # effectively forking a new thread of execution. We are
-    # probably currently within a ``with LoggingContext()`` block,
-    # which is supposed to have a single entry and exit point. But
-    # by spawning off another deferred, we are effectively
-    # adding a new exit point.)
-    res.addBoth(_set_context_cb, ctx)
-    return res
-
-
-def make_deferred_yieldable(deferred):
-    """Given a deferred, make it follow the Synapse logcontext rules:
-
-    If the deferred has completed (or is not actually a Deferred), essentially
-    does nothing (just returns another completed deferred with the
-    result/failure).
-
-    If the deferred has not yet completed, resets the logcontext before
-    returning a deferred. Then, when the deferred completes, restores the
-    current logcontext before running callbacks/errbacks.
-
-    (This is more-or-less the opposite operation to run_in_background.)
-    """
-    if not isinstance(deferred, defer.Deferred):
-        return deferred
-
-    if deferred.called and not deferred.paused:
-        # it looks like this deferred is ready to run any callbacks we give it
-        # immediately. We may as well optimise out the logcontext faffery.
-        return deferred
-
-    # ok, we can't be sure that a yield won't block, so let's reset the
-    # logcontext, and add a callback to the deferred to restore it.
-    prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
-    deferred.addBoth(_set_context_cb, prev_context)
-    return deferred
-
-
-def _set_context_cb(result, context):
-    """A callback function which just sets the logging context"""
-    LoggingContext.set_current_context(context)
-    return result
-
-
-def defer_to_thread(reactor, f, *args, **kwargs):
-    """
-    Calls the function `f` using a thread from the reactor's default threadpool and
-    returns the result as a Deferred.
-
-    Creates a new logcontext for `f`, which is created as a child of the current
-    logcontext (so its CPU usage metrics will get attributed to the current
-    logcontext). `f` should preserve the logcontext it is given.
-
-    The result deferred follows the Synapse logcontext rules: you should `yield`
-    on it.
-
-    Args:
-        reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
-            the Deferred will be invoked, and whose threadpool we should use for the
-            function.
-
-            Normally this will be hs.get_reactor().
-
-        f (callable): The function to call.
-
-        args: positional arguments to pass to f.
-
-        kwargs: keyword arguments to pass to f.
-
-    Returns:
-        Deferred: A Deferred which fires a callback with the result of `f`, or an
-            errback if `f` throws an exception.
-    """
-    return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
-
-
-def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
-    """
-    A wrapper for twisted.internet.threads.deferToThreadpool, which handles
-    logcontexts correctly.
-
-    Calls the function `f` using a thread from the given threadpool and returns
-    the result as a Deferred.
-
-    Creates a new logcontext for `f`, which is created as a child of the current
-    logcontext (so its CPU usage metrics will get attributed to the current
-    logcontext). `f` should preserve the logcontext it is given.
-
-    The result deferred follows the Synapse logcontext rules: you should `yield`
-    on it.
-
-    Args:
-        reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
-            the Deferred will be invoked. Normally this will be hs.get_reactor().
-
-        threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
-            running `f`. Normally this will be hs.get_reactor().getThreadPool().
-
-        f (callable): The function to call.
-
-        args: positional arguments to pass to f.
-
-        kwargs: keyword arguments to pass to f.
-
-    Returns:
-        Deferred: A Deferred which fires a callback with the result of `f`, or an
-            errback if `f` throws an exception.
-    """
-    logcontext = LoggingContext.current_context()
-
-    def g():
-        with LoggingContext(parent_context=logcontext):
-            return f(*args, **kwargs)
-
-    return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
+from synapse.logging.context import (
+    LoggingContext,
+    LoggingContextFilter,
+    PreserveLoggingContext,
+    defer_to_thread,
+    make_deferred_yieldable,
+    nested_logging_context,
+    preserve_fn,
+    run_in_background,
+)
+
+__all__ = [
+    "defer_to_thread",
+    "LoggingContext",
+    "LoggingContextFilter",
+    "make_deferred_yieldable",
+    "nested_logging_context",
+    "preserve_fn",
+    "PreserveLoggingContext",
+    "run_in_background",
+]
diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py
index fbf570c756..320e8f8174 100644
--- a/synapse/util/logformatter.py
+++ b/synapse/util/logformatter.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright 2017 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,41 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+"""
+Backwards compatibility re-exports of ``synapse.logging.formatter`` functionality.
+"""
 
-import logging
-import traceback
+from synapse.logging.formatter import LogFormatter
 
-from six import StringIO
-
-
-class LogFormatter(logging.Formatter):
-    """Log formatter which gives more detail for exceptions
-
-    This is the same as the standard log formatter, except that when logging
-    exceptions [typically via log.foo("msg", exc_info=1)], it prints the
-    sequence that led up to the point at which the exception was caught.
-    (Normally only stack frames between the point the exception was raised and
-    where it was caught are logged).
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(LogFormatter, self).__init__(*args, **kwargs)
-
-    def formatException(self, ei):
-        sio = StringIO()
-        (typ, val, tb) = ei
-
-        # log the stack above the exception capture point if possible, but
-        # check that we actually have an f_back attribute to work around
-        # https://twistedmatrix.com/trac/ticket/9305
-
-        if tb and hasattr(tb.tb_frame, "f_back"):
-            sio.write("Capture point (most recent call last):\n")
-            traceback.print_stack(tb.tb_frame.f_back, None, sio)
-
-        traceback.print_exception(typ, val, tb, None, sio)
-        s = sio.getvalue()
-        sio.close()
-        if s[-1:] == "\n":
-            s = s[:-1]
-        return s
+__all__ = ["LogFormatter"]
diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py
deleted file mode 100644
index 7df0fa6087..0000000000
--- a/synapse/util/logutils.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import inspect
-import logging
-import time
-from functools import wraps
-from inspect import getcallargs
-
-from six import PY3
-
-_TIME_FUNC_ID = 0
-
-
-def _log_debug_as_f(f, msg, msg_args):
-    name = f.__module__
-    logger = logging.getLogger(name)
-
-    if logger.isEnabledFor(logging.DEBUG):
-        if PY3:
-            lineno = f.__code__.co_firstlineno
-            pathname = f.__code__.co_filename
-        else:
-            lineno = f.func_code.co_firstlineno
-            pathname = f.func_code.co_filename
-
-        record = logging.LogRecord(
-            name=name,
-            level=logging.DEBUG,
-            pathname=pathname,
-            lineno=lineno,
-            msg=msg,
-            args=msg_args,
-            exc_info=None,
-        )
-
-        logger.handle(record)
-
-
-def log_function(f):
-    """ Function decorator that logs every call to that function.
-    """
-    func_name = f.__name__
-
-    @wraps(f)
-    def wrapped(*args, **kwargs):
-        name = f.__module__
-        logger = logging.getLogger(name)
-        level = logging.DEBUG
-
-        if logger.isEnabledFor(level):
-            bound_args = getcallargs(f, *args, **kwargs)
-
-            def format(value):
-                r = str(value)
-                if len(r) > 50:
-                    r = r[:50] + "..."
-                return r
-
-            func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()]
-
-            msg_args = {"func_name": func_name, "args": ", ".join(func_args)}
-
-            _log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args)
-
-        return f(*args, **kwargs)
-
-    wrapped.__name__ = func_name
-    return wrapped
-
-
-def time_function(f):
-    func_name = f.__name__
-
-    @wraps(f)
-    def wrapped(*args, **kwargs):
-        global _TIME_FUNC_ID
-        id = _TIME_FUNC_ID
-        _TIME_FUNC_ID += 1
-
-        start = time.clock()
-
-        try:
-            _log_debug_as_f(f, "[FUNC START] {%s-%d}", (func_name, id))
-
-            r = f(*args, **kwargs)
-        finally:
-            end = time.clock()
-            _log_debug_as_f(
-                f, "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start)
-            )
-
-        return r
-
-    return wrapped
-
-
-def trace_function(f):
-    func_name = f.__name__
-    linenum = f.func_code.co_firstlineno
-    pathname = f.func_code.co_filename
-
-    @wraps(f)
-    def wrapped(*args, **kwargs):
-        name = f.__module__
-        logger = logging.getLogger(name)
-        level = logging.DEBUG
-
-        s = inspect.currentframe().f_back
-
-        to_print = [
-            "\t%s:%s %s. Args: args=%s, kwargs=%s"
-            % (pathname, linenum, func_name, args, kwargs)
-        ]
-        while s:
-            if True or s.f_globals["__name__"].startswith("synapse"):
-                filename, lineno, function, _, _ = inspect.getframeinfo(s)
-                args_string = inspect.formatargvalues(*inspect.getargvalues(s))
-
-                to_print.append(
-                    "\t%s:%d %s. Args: %s" % (filename, lineno, function, args_string)
-                )
-
-            s = s.f_back
-
-        msg = "\nTraceback for %s:\n" % (func_name,) + "\n".join(to_print)
-
-        record = logging.LogRecord(
-            name=name,
-            level=level,
-            pathname=pathname,
-            lineno=lineno,
-            msg=msg,
-            args=None,
-            exc_info=None,
-        )
-
-        logger.handle(record)
-
-        return f(*args, **kwargs)
-
-    wrapped.__name__ = func_name
-    return wrapped
-
-
-def get_previous_frames():
-    s = inspect.currentframe().f_back.f_back
-    to_return = []
-    while s:
-        if s.f_globals["__name__"].startswith("synapse"):
-            filename, lineno, function, _, _ = inspect.getframeinfo(s)
-            args_string = inspect.formatargvalues(*inspect.getargvalues(s))
-
-            to_return.append(
-                "{{  %s:%d %s - Args: %s }}" % (filename, lineno, function, args_string)
-            )
-
-        s = s.f_back
-
-    return ", ".join(to_return)
-
-
-def get_previous_frame(ignore=[]):
-    s = inspect.currentframe().f_back.f_back
-
-    while s:
-        if s.f_globals["__name__"].startswith("synapse"):
-            if not any(s.f_globals["__name__"].startswith(ig) for ig in ignore):
-                filename, lineno, function, _, _ = inspect.getframeinfo(s)
-                args_string = inspect.formatargvalues(*inspect.getargvalues(s))
-
-                return "{{  %s:%d %s - Args: %s }}" % (
-                    filename,
-                    lineno,
-                    function,
-                    args_string,
-                )
-
-        s = s.f_back
-
-    return None
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 01284d3cf8..c30b6de19c 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -20,8 +20,8 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
+from synapse.logging.context import LoggingContext
 from synapse.metrics import InFlightGauge
-from synapse.util.logcontext import LoggingContext
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 06defa8199..5ca4521ce3 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -20,7 +20,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.errors import LimitExceededError
-from synapse.util.logcontext import (
+from synapse.logging.context import (
     PreserveLoggingContext,
     make_deferred_yieldable,
     run_in_background,
@@ -36,9 +36,11 @@ class FederationRateLimiter(object):
             clock (Clock)
             config (FederationRateLimitConfig)
         """
-        self.clock = clock
-        self._config = config
-        self.ratelimiters = {}
+
+        def new_limiter():
+            return _PerHostRatelimiter(clock=clock, config=config)
+
+        self.ratelimiters = collections.defaultdict(new_limiter)
 
     def ratelimit(self, host):
         """Used to ratelimit an incoming request from given host
@@ -53,11 +55,9 @@ class FederationRateLimiter(object):
             host (str): Origin of incoming request.
 
         Returns:
-            _PerHostRatelimiter
+            context manager which returns a deferred.
         """
-        return self.ratelimiters.setdefault(
-            host, _PerHostRatelimiter(clock=self.clock, config=self._config)
-        ).ratelimit()
+        return self.ratelimiters[host].ratelimit()
 
 
 class _PerHostRatelimiter(object):
@@ -122,7 +122,7 @@ class _PerHostRatelimiter(object):
         self.request_times.append(time_now)
 
         def queue_request():
-            if len(self.current_processing) > self.concurrent_requests:
+            if len(self.current_processing) >= self.concurrent_requests:
                 queue_defer = defer.Deferred()
                 self.ready_request_queue[request_id] = queue_defer
                 logger.info(
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 1a77456498..d8d0ceae51 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -17,7 +17,7 @@ import random
 
 from twisted.internet import defer
 
-import synapse.util.logcontext
+import synapse.logging.context
 from synapse.api.errors import CodeMessageException
 
 logger = logging.getLogger(__name__)
@@ -225,4 +225,4 @@ class RetryDestinationLimiter(object):
                 logger.exception("Failed to store destination_retry_timings")
 
         # we deliberately do this in the background.
-        synapse.util.logcontext.run_in_background(store_retry_timings)
+        synapse.logging.context.run_in_background(store_retry_timings)