diff --git a/synapse/util/async.py b/synapse/util/async_helpers.py
index 9dd4e6b5bc..9b3f2f4b96 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async_helpers.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,42 +13,27 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import collections
+import logging
+from contextlib import contextmanager
+from six.moves import range
-from twisted.internet import defer, reactor
+from twisted.internet import defer
from twisted.internet.defer import CancelledError
from twisted.python import failure
+from synapse.util import Clock, logcontext, unwrapFirstError
+
from .logcontext import (
- PreserveLoggingContext, make_deferred_yieldable, run_in_background
+ PreserveLoggingContext,
+ make_deferred_yieldable,
+ run_in_background,
)
-from synapse.util import logcontext, unwrapFirstError
-
-from contextlib import contextmanager
-
-import logging
-
-from six.moves import range
logger = logging.getLogger(__name__)
-@defer.inlineCallbacks
-def sleep(seconds):
- d = defer.Deferred()
- with PreserveLoggingContext():
- reactor.callLater(seconds, d.callback, seconds)
- res = yield d
- defer.returnValue(res)
-
-
-def run_on_reactor():
- """ This will cause the rest of the function to be invoked upon the next
- iteration of the main loop
- """
- return sleep(0)
-
-
class ObservableDeferred(object):
"""Wraps a deferred object so that we can add observer deferreds. These
observer deferreds do not affect the callback chain of the original
@@ -171,86 +157,8 @@ def concurrently_execute(func, args, limit):
class Linearizer(object):
- """Linearizes access to resources based on a key. Useful to ensure only one
- thing is happening at a time on a given resource.
-
- Example:
-
- with (yield linearizer.queue("test_key")):
- # do some work.
-
- """
- def __init__(self, name=None):
- if name is None:
- self.name = id(self)
- else:
- self.name = name
- self.key_to_defer = {}
-
- @defer.inlineCallbacks
- def queue(self, key):
- # If there is already a deferred in the queue, we pull it out so that
- # we can wait on it later.
- # Then we replace it with a deferred that we resolve *after* the
- # context manager has exited.
- # We only return the context manager after the previous deferred has
- # resolved.
- # This all has the net effect of creating a chain of deferreds that
- # wait for the previous deferred before starting their work.
- current_defer = self.key_to_defer.get(key)
-
- new_defer = defer.Deferred()
- self.key_to_defer[key] = new_defer
-
- if current_defer:
- logger.info(
- "Waiting to acquire linearizer lock %r for key %r", self.name, key
- )
- try:
- with PreserveLoggingContext():
- yield current_defer
- except Exception:
- logger.exception("Unexpected exception in Linearizer")
-
- logger.info("Acquired linearizer lock %r for key %r", self.name,
- key)
-
- # if the code holding the lock completes synchronously, then it
- # will recursively run the next claimant on the list. That can
- # relatively rapidly lead to stack exhaustion. This is essentially
- # the same problem as http://twistedmatrix.com/trac/ticket/9304.
- #
- # In order to break the cycle, we add a cheeky sleep(0) here to
- # ensure that we fall back to the reactor between each iteration.
- #
- # (There's no particular need for it to happen before we return
- # the context manager, but it needs to happen while we hold the
- # lock, and the context manager's exit code must be synchronous,
- # so actually this is the only sensible place.
- yield run_on_reactor()
-
- else:
- logger.info("Acquired uncontended linearizer lock %r for key %r",
- self.name, key)
-
- @contextmanager
- def _ctx_manager():
- try:
- yield
- finally:
- logger.info("Releasing linearizer lock %r for key %r", self.name, key)
- with PreserveLoggingContext():
- new_defer.callback(None)
- current_d = self.key_to_defer.get(key)
- if current_d is new_defer:
- self.key_to_defer.pop(key, None)
-
- defer.returnValue(_ctx_manager())
-
-
-class Limiter(object):
"""Limits concurrent access to resources based on a key. Useful to ensure
- only a few thing happen at a time on a given resource.
+ only a few things happen at a time on a given resource.
Example:
@@ -258,54 +166,65 @@ class Limiter(object):
# do some work.
"""
- def __init__(self, max_count):
+ def __init__(self, name=None, max_count=1, clock=None):
"""
Args:
- max_count(int): The maximum number of concurrent access
+ max_count(int): The maximum number of concurrent accesses
"""
+ if name is None:
+ self.name = id(self)
+ else:
+ self.name = name
+
+ if not clock:
+ from twisted.internet import reactor
+ clock = Clock(reactor)
+ self._clock = clock
self.max_count = max_count
# key_to_defer is a map from the key to a 2 element list where
- # the first element is the number of things executing
- # the second element is a list of deferreds for the things blocked from
- # executing.
+ # the first element is the number of things executing, and
+ # the second element is an OrderedDict, where the keys are deferreds for the
+ # things blocked from executing.
self.key_to_defer = {}
- @defer.inlineCallbacks
def queue(self, key):
- entry = self.key_to_defer.setdefault(key, [0, []])
+ # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly.
+ # (https://twistedmatrix.com/trac/ticket/4632 meant that cancellations were not
+ # propagated inside inlineCallbacks until Twisted 18.7)
+ entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()])
# If the number of things executing is greater than the maximum
# then add a deferred to the list of blocked items
- # When on of the things currently executing finishes it will callback
+ # When one of the things currently executing finishes it will callback
# this item so that it can continue executing.
if entry[0] >= self.max_count:
- new_defer = defer.Deferred()
- entry[1].append(new_defer)
-
- logger.info("Waiting to acquire limiter lock for key %r", key)
- with PreserveLoggingContext():
- yield new_defer
- logger.info("Acquired limiter lock for key %r", key)
+ res = self._await_lock(key)
else:
- logger.info("Acquired uncontended limiter lock for key %r", key)
+ logger.info(
+ "Acquired uncontended linearizer lock %r for key %r", self.name, key,
+ )
+ entry[0] += 1
+ res = defer.succeed(None)
- entry[0] += 1
+ # once we successfully get the lock, we need to return a context manager which
+ # will release the lock.
@contextmanager
- def _ctx_manager():
+ def _ctx_manager(_):
try:
yield
finally:
- logger.info("Releasing limiter lock for key %r", key)
+ logger.info("Releasing linearizer lock %r for key %r", self.name, key)
# We've finished executing so check if there are any things
# blocked waiting to execute and start one of them
entry[0] -= 1
if entry[1]:
- next_def = entry[1].pop(0)
+ (next_def, _) = entry[1].popitem(last=False)
+ # we need to run the next thing in the sentinel context.
with PreserveLoggingContext():
next_def.callback(None)
elif entry[0] == 0:
@@ -313,7 +232,64 @@ class Limiter(object):
# map.
del self.key_to_defer[key]
- defer.returnValue(_ctx_manager())
+ res.addCallback(_ctx_manager)
+ return res
+
+ def _await_lock(self, key):
+ """Helper for queue: adds a deferred to the queue
+
+ Assumes that we've already checked that we've reached the limit of the number
+ of lock-holders we allow. Creates a new deferred which is added to the list, and
+ adds some management around cancellations.
+
+ Returns the deferred, which will callback once we have secured the lock.
+
+ """
+ entry = self.key_to_defer[key]
+
+ logger.info(
+ "Waiting to acquire linearizer lock %r for key %r", self.name, key,
+ )
+
+ new_defer = make_deferred_yieldable(defer.Deferred())
+ entry[1][new_defer] = 1
+
+ def cb(_r):
+ logger.info("Acquired linearizer lock %r for key %r", self.name, key)
+ entry[0] += 1
+
+ # if the code holding the lock completes synchronously, then it
+ # will recursively run the next claimant on the list. That can
+ # relatively rapidly lead to stack exhaustion. This is essentially
+ # the same problem as http://twistedmatrix.com/trac/ticket/9304.
+ #
+ # In order to break the cycle, we add a cheeky sleep(0) here to
+ # ensure that we fall back to the reactor between each iteration.
+ #
+ # (This needs to happen while we hold the lock, and the context manager's exit
+ # code must be synchronous, so this is the only sensible place.)
+ return self._clock.sleep(0)
+
+ def eb(e):
+ logger.info("defer %r got err %r", new_defer, e)
+ if isinstance(e, CancelledError):
+ logger.info(
+ "Cancelling wait for linearizer lock %r for key %r",
+ self.name, key,
+ )
+
+ else:
+ logger.warn(
+ "Unexpected exception waiting for linearizer lock %r for key %r",
+ self.name, key,
+ )
+
+ # we just have to take ourselves back out of the queue.
+ del entry[1][new_defer]
+ return e
+
+ new_defer.addCallbacks(cb, eb)
+ return new_defer
class ReadWriteLock(object):
@@ -404,7 +380,7 @@ class DeferredTimeoutError(Exception):
"""
-def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None):
+def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
"""
Add a timeout to a deferred by scheduling it to be cancelled after
timeout seconds.
@@ -419,6 +395,7 @@ def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None):
Args:
deferred (defer.Deferred): deferred to be timed out
timeout (Number): seconds to time out after
+ reactor (twisted.internet.reactor): the Twisted reactor to use
on_timeout_cancel (callable): A callable which is called immediately
after the deferred times out, and not if this deferred is
|