diff --git a/CHANGES.rst b/CHANGES.rst
index 40d13c6484..317846d2a2 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,5 +1,26 @@
+Changes in synapse v0.28.1 (2018-05-01)
+=======================================
+
+SECURITY UPDATE
+
+* Clamp the allowed values of event depth received over federation to be
+ [0, 2^63 - 1]. This mitigates an attack where malicious events
+ injected with depth = 2^63 - 1 render rooms unusable. Depth is used to
+ determine the cosmetic ordering of events within a room, and so the ordering
+ of events in such a room will default to using stream_ordering rather than depth
+ (topological_ordering).
+
+ This is a temporary solution to mitigate abuse in the wild, whilst a long term solution
+ is being implemented to improve how the depth parameter is used.
+
+ Full details at
+ https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI
+
+* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API.
+
+
Changes in synapse v0.28.0 (2018-04-26)
-===========================================
+=======================================
Bug Fixes:
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 4924f44d4e..f31cb9a3cb 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.28.0"
+__version__ = "0.28.1"
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 489efb7f86..5baba43966 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -16,6 +16,9 @@
"""Contains constants from the specification."""
+# the "depth" field on events is limited to 2**63 - 1
+MAX_DEPTH = 2**63 - 1
+
class Membership(object):
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 79eaa31031..4cc98a3fe8 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -14,7 +14,10 @@
# limitations under the License.
import logging
-from synapse.api.errors import SynapseError
+import six
+
+from synapse.api.constants import MAX_DEPTH
+from synapse.api.errors import SynapseError, Codes
from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import FrozenEvent
from synapse.events.utils import prune_event
@@ -190,11 +193,23 @@ def event_from_pdu_json(pdu_json, outlier=False):
FrozenEvent
Raises:
- SynapseError: if the pdu is missing required fields
+ SynapseError: if the pdu is missing required fields or is otherwise
+ not a valid matrix event
"""
# we could probably enforce a bunch of other fields here (room_id, sender,
# origin, etc etc)
- assert_params_in_request(pdu_json, ('event_id', 'type'))
+ assert_params_in_request(pdu_json, ('event_id', 'type', 'depth'))
+
+ depth = pdu_json['depth']
+ if not isinstance(depth, six.integer_types):
+ raise SynapseError(400, "Depth %r not an intger" % (depth, ),
+ Codes.BAD_JSON)
+
+ if depth < 0:
+ raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
+ elif depth > MAX_DEPTH:
+ raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
+
event = FrozenEvent(
pdu_json
)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 23502eda70..b793fc4df7 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -22,7 +22,7 @@ import six
from twisted.internet import defer, reactor
from twisted.python.failure import Failure
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, MAX_DEPTH
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events.utils import serialize_event
@@ -625,6 +625,10 @@ class EventCreationHandler(object):
if prev_events_and_hashes:
depth = max([d for _, _, d in prev_events_and_hashes]) + 1
+ # we cap depth of generated events, to ensure that they are not
+ # rejected by other servers (and so that they can be persisted in
+ # the db)
+ depth = min(depth, MAX_DEPTH)
else:
depth = 1
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 89bd47c3f7..fbba94e633 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -16,6 +16,7 @@
from itertools import chain
import logging
+import re
logger = logging.getLogger(__name__)
@@ -56,8 +57,7 @@ class BaseMetric(object):
return not len(self.labels)
def _render_labelvalue(self, value):
- # TODO: escape backslashes, quotes and newlines
- return '"%s"' % (value)
+ return '"%s"' % (_escape_label_value(value),)
def _render_key(self, values):
if self.is_scalar():
@@ -299,3 +299,29 @@ class MemoryUsageMetric(object):
"process_psutil_rss:total %d" % sum_rss,
"process_psutil_rss:count %d" % len_rss,
]
+
+
+def _escape_character(m):
+ """Replaces a single character with its escape sequence.
+
+ Args:
+ m (re.MatchObject): A match object whose first group is the single
+ character to replace
+
+ Returns:
+ str
+ """
+ c = m.group(1)
+ if c == "\\":
+ return "\\\\"
+ elif c == "\"":
+ return "\\\""
+ elif c == "\n":
+ return "\\n"
+ return c
+
+
+def _escape_label_value(value):
+ """Takes a label value and escapes quotes, newlines and backslashes
+ """
+ return re.sub(r"([\n\"\\])", _escape_character, value)
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index d9c4af9389..c0d2f06855 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -143,6 +143,7 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
respond_404(request)
return
+ logger.debug("Responding to media request with responder %s")
add_file_headers(request, media_type, file_size, upload_name)
with responder:
yield responder.write_to_consumer(request)
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 7f263db239..d23fe10b07 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -255,7 +255,9 @@ class FileResponder(Responder):
self.open_file = open_file
def write_to_consumer(self, consumer):
- return FileSender().beginFileTransfer(self.open_file, consumer)
+ return make_deferred_yieldable(
+ FileSender().beginFileTransfer(self.open_file, consumer)
+ )
def __exit__(self, exc_type, exc_val, exc_tb):
self.open_file.close()
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index 01ac71e53e..e086e12213 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -302,7 +302,7 @@ def preserve_fn(f):
def run_in_background(f, *args, **kwargs):
"""Calls a function, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
- deferred returned by the funtion completes.
+ deferred returned by the function completes.
Useful for wrapping functions that return a deferred which you don't yield
on (for instance because you want to pass it to deferred.gatherResults()).
@@ -320,24 +320,31 @@ def run_in_background(f, *args, **kwargs):
# by synchronous exceptions, so let's turn them into Failures.
return defer.fail()
- if isinstance(res, defer.Deferred) and not res.called:
- # The function will have reset the context before returning, so
- # we need to restore it now.
- LoggingContext.set_current_context(current)
-
- # The original context will be restored when the deferred
- # completes, but there is nothing waiting for it, so it will
- # get leaked into the reactor or some other function which
- # wasn't expecting it. We therefore need to reset the context
- # here.
- #
- # (If this feels asymmetric, consider it this way: we are
- # effectively forking a new thread of execution. We are
- # probably currently within a ``with LoggingContext()`` block,
- # which is supposed to have a single entry and exit point. But
- # by spawning off another deferred, we are effectively
- # adding a new exit point.)
- res.addBoth(_set_context_cb, LoggingContext.sentinel)
+ if not isinstance(res, defer.Deferred):
+ return res
+
+ if res.called and not res.paused:
+ # The function should have maintained the logcontext, so we can
+ # optimise out the messing about
+ return res
+
+ # The function may have reset the context before returning, so
+ # we need to restore it now.
+ ctx = LoggingContext.set_current_context(current)
+
+ # The original context will be restored when the deferred
+ # completes, but there is nothing waiting for it, so it will
+ # get leaked into the reactor or some other function which
+ # wasn't expecting it. We therefore need to reset the context
+ # here.
+ #
+ # (If this feels asymmetric, consider it this way: we are
+ # effectively forking a new thread of execution. We are
+ # probably currently within a ``with LoggingContext()`` block,
+ # which is supposed to have a single entry and exit point. But
+ # by spawning off another deferred, we are effectively
+ # adding a new exit point.)
+ res.addBoth(_set_context_cb, ctx)
return res
@@ -354,9 +361,18 @@ def make_deferred_yieldable(deferred):
(This is more-or-less the opposite operation to run_in_background.)
"""
- if isinstance(deferred, defer.Deferred) and not deferred.called:
- prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
- deferred.addBoth(_set_context_cb, prev_context)
+ if not isinstance(deferred, defer.Deferred):
+ return deferred
+
+ if deferred.called and not deferred.paused:
+ # it looks like this deferred is ready to run any callbacks we give it
+ # immediately. We may as well optimise out the logcontext faffery.
+ return deferred
+
+ # ok, we can't be sure that a yield won't block, so let's reset the
+ # logcontext, and add a callback to the deferred to restore it.
+ prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
+ deferred.addBoth(_set_context_cb, prev_context)
return deferred
diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py
index 59ab3c6968..3e42868ea9 100644
--- a/synapse/util/logformatter.py
+++ b/synapse/util/logformatter.py
@@ -32,7 +32,7 @@ class LogFormatter(logging.Formatter):
super(LogFormatter, self).__init__(*args, **kwargs)
def formatException(self, ei):
- sio = StringIO.StringIO()
+ sio = StringIO()
(typ, val, tb) = ei
# log the stack above the exception capture point if possible, but
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index e5a902f734..9181692771 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -17,6 +17,8 @@ from synapse.appservice.scheduler import (
_ServiceQueuer, _TransactionController, _Recoverer
)
from twisted.internet import defer
+
+from synapse.util.logcontext import make_deferred_yieldable
from ..utils import MockClock
from mock import Mock
from tests import unittest
@@ -204,7 +206,9 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
def test_send_single_event_with_queue(self):
d = defer.Deferred()
- self.txn_ctrl.send = Mock(return_value=d)
+ self.txn_ctrl.send = Mock(
+ side_effect=lambda x, y: make_deferred_yieldable(d),
+ )
service = Mock(id=4)
event = Mock(event_id="first")
event2 = Mock(event_id="second")
@@ -235,7 +239,10 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
srv_2_event2 = Mock(event_id="srv2b")
send_return_list = [srv_1_defer, srv_2_defer]
- self.txn_ctrl.send = Mock(side_effect=lambda x, y: send_return_list.pop(0))
+
+ def do_send(x, y):
+ return make_deferred_yieldable(send_return_list.pop(0))
+ self.txn_ctrl.send = Mock(side_effect=do_send)
# send events for different ASes and make sure they are sent
self.queuer.enqueue(srv1, srv_1_event)
diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py
index 39bde6e3f8..069c0be762 100644
--- a/tests/metrics/test_metric.py
+++ b/tests/metrics/test_metric.py
@@ -16,7 +16,8 @@
from tests import unittest
from synapse.metrics.metric import (
- CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
+ CounterMetric, CallbackMetric, DistributionMetric, CacheMetric,
+ _escape_label_value,
)
@@ -171,3 +172,21 @@ class CacheMetricTestCase(unittest.TestCase):
'cache:size{name="cache_name"} 1',
'cache:evicted_size{name="cache_name"} 2',
])
+
+
+class LabelValueEscapeTestCase(unittest.TestCase):
+ def test_simple(self):
+ string = "safjhsdlifhyskljfksdfh"
+ self.assertEqual(string, _escape_label_value(string))
+
+ def test_escape(self):
+ self.assertEqual(
+ "abc\\\"def\\nghi\\\\",
+ _escape_label_value("abc\"def\nghi\\"),
+ )
+
+ def test_sequence_of_escapes(self):
+ self.assertEqual(
+ "abc\\\"def\\nghi\\\\\\n",
+ _escape_label_value("abc\"def\nghi\\\n"),
+ )
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 575374c6a6..9962ce8a5d 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -128,7 +128,6 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
yield _rotate(10)
yield _assert_counts(1, 1)
- @tests.unittest.DEBUG
@defer.inlineCallbacks
def test_find_first_stream_ordering_after_ts(self):
def add_event(so, ts):
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 4850722bc5..ad78d884e0 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -36,24 +36,28 @@ class LoggingContextTestCase(unittest.TestCase):
yield sleep(0)
self._check_test_key("one")
- def _test_preserve_fn(self, function):
+ def _test_run_in_background(self, function):
sentinel_context = LoggingContext.current_context()
callback_completed = [False]
- @defer.inlineCallbacks
- def cb():
+ def test():
context_one.request = "one"
- yield function()
- self._check_test_key("one")
+ d = function()
- callback_completed[0] = True
+ def cb(res):
+ self._check_test_key("one")
+ callback_completed[0] = True
+ return res
+ d.addCallback(cb)
+
+ return d
with LoggingContext() as context_one:
context_one.request = "one"
# fire off function, but don't wait on it.
- logcontext.preserve_fn(cb)()
+ logcontext.run_in_background(test)
self._check_test_key("one")
@@ -80,20 +84,30 @@ class LoggingContextTestCase(unittest.TestCase):
# test is done once d2 finishes
return d2
- def test_preserve_fn_with_blocking_fn(self):
+ def test_run_in_background_with_blocking_fn(self):
@defer.inlineCallbacks
def blocking_function():
yield sleep(0)
- return self._test_preserve_fn(blocking_function)
+ return self._test_run_in_background(blocking_function)
- def test_preserve_fn_with_non_blocking_fn(self):
+ def test_run_in_background_with_non_blocking_fn(self):
@defer.inlineCallbacks
def nonblocking_function():
with logcontext.PreserveLoggingContext():
yield defer.succeed(None)
- return self._test_preserve_fn(nonblocking_function)
+ return self._test_run_in_background(nonblocking_function)
+
+ def test_run_in_background_with_chained_deferred(self):
+ # a function which returns a deferred which looks like it has been
+ # called, but is actually paused
+ def testfunc():
+ return logcontext.make_deferred_yieldable(
+ _chained_deferred_function()
+ )
+
+ return self._test_run_in_background(testfunc)
@defer.inlineCallbacks
def test_make_deferred_yieldable(self):
@@ -119,6 +133,22 @@ class LoggingContextTestCase(unittest.TestCase):
self._check_test_key("one")
@defer.inlineCallbacks
+ def test_make_deferred_yieldable_with_chained_deferreds(self):
+ sentinel_context = LoggingContext.current_context()
+
+ with LoggingContext() as context_one:
+ context_one.request = "one"
+
+ d1 = logcontext.make_deferred_yieldable(_chained_deferred_function())
+ # make sure that the context was reset by make_deferred_yieldable
+ self.assertIs(LoggingContext.current_context(), sentinel_context)
+
+ yield d1
+
+ # now it should be restored
+ self._check_test_key("one")
+
+ @defer.inlineCallbacks
def test_make_deferred_yieldable_on_non_deferred(self):
"""Check that make_deferred_yieldable does the right thing when its
argument isn't actually a deferred"""
@@ -132,3 +162,17 @@ class LoggingContextTestCase(unittest.TestCase):
r = yield d1
self.assertEqual(r, "bum")
self._check_test_key("one")
+
+
+# a function which returns a deferred which has been "called", but
+# which had a function which returned another incomplete deferred on
+# its callback list, so won't yet call any other new callbacks.
+def _chained_deferred_function():
+ d = defer.succeed(None)
+
+ def cb(res):
+ d2 = defer.Deferred()
+ reactor.callLater(0, d2.callback, res)
+ return d2
+ d.addCallback(cb)
+ return d
diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py
new file mode 100644
index 0000000000..1a1a8412f2
--- /dev/null
+++ b/tests/util/test_logformatter.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+
+from synapse.util.logformatter import LogFormatter
+from tests import unittest
+
+
+class TestException(Exception):
+ pass
+
+
+class LogFormatterTestCase(unittest.TestCase):
+ def test_formatter(self):
+ formatter = LogFormatter()
+
+ try:
+ raise TestException("testytest")
+ except TestException:
+ ei = sys.exc_info()
+
+ output = formatter.formatException(ei)
+
+ # check the output looks vaguely sane
+ self.assertIn("testytest", output)
+ self.assertIn("Capture point", output)
|