diff options
author | Richard van der Hoff <richard@matrix.org> | 2019-01-31 18:43:20 +0000 |
---|---|---|
committer | Richard van der Hoff <richard@matrix.org> | 2019-01-31 18:43:20 +0000 |
commit | 625385d6841b5d2e62d83f073fd7f1af5a5aeda6 (patch) | |
tree | f1fe4bbd1f524985a9e61db6b15ff2c0b21b72a4 | |
parent | Reject large transactions on federation (#4513) (diff) | |
parent | v0.99.0rc3 (diff) | |
download | synapse-625385d6841b5d2e62d83f073fd7f1af5a5aeda6.tar.xz |
Merge branch 'release-v0.99.0' into develop
-rw-r--r-- | CHANGES.md | 21 | ||||
-rw-r--r-- | README.rst | 36 | ||||
-rw-r--r-- | contrib/prometheus/README.md (renamed from contrib/prometheus/README) | 7 | ||||
-rw-r--r-- | synapse/__init__.py | 2 | ||||
-rw-r--r-- | synapse/handlers/sync.py | 13 | ||||
-rw-r--r-- | synapse/storage/events_worker.py | 37 |
6 files changed, 103 insertions, 13 deletions
diff --git a/CHANGES.md b/CHANGES.md index e08b8771b8..458bbaf118 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 0.99.0rc3 (2019-01-31) +============================== + +Bugfixes +-------- + +- Fix infinite loop when an event is redacted in a v3 room ([\#4535](https://github.com/matrix-org/synapse/issues/4535)) + + +Improved Documentation +---------------------- + +- Update debian installation instructions ([\#4526](https://github.com/matrix-org/synapse/issues/4526)) + + +Internal Changes +---------------- + +- Add some debug for membership syncing issues ([\#4538](https://github.com/matrix-org/synapse/issues/4538)) + + Synapse 0.99.0rc2 (2019-01-30) ============================== diff --git a/README.rst b/README.rst index 05a3bb3751..e6354ccba0 100644 --- a/README.rst +++ b/README.rst @@ -333,12 +333,38 @@ https://developer.github.com/changes/2014-04-25-user-content-security for more d Platform-Specific Instructions ============================== -Debian ------- +Debian/Ubuntu +------------- + +Matrix.org packages +~~~~~~~~~~~~~~~~~~~ + +Matrix.org provides Debian/Ubuntu packages of the latest stable version of +Synapse via https://matrix.org/packages/debian/. To use them:: + + sudo apt install -y lsb-release curl apt-transport-https + echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" | + sudo tee /etc/apt/sources.list.d/matrix-org.list + curl "https://matrix.org/packages/debian/repo-key.asc" | + sudo apt-key add - + sudo apt update + sudo apt install matrix-synapse-py3 + +Downstream Debian/Ubuntu packages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For ``buster`` and ``sid``, Synapse is available in the Debian repositories and +it should be possible to install it with simply:: + + sudo apt install matrix-synapse + +There is also a version of ``matrix-synapse`` in ``stretch-backports``. Please +see the `Debian documentation on backports +<https://backports.debian.org/Instructions/>`_ for information on how to use +them. -Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/. -Note that these packages do not include a client - choose one from -https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :) +We do not recommend using the packages in downstream Ubuntu at this time, as +they are old and suffer from known security vulnerabilities. Fedora ------ diff --git a/contrib/prometheus/README b/contrib/prometheus/README.md index 7b733172e6..e646cb7ea7 100644 --- a/contrib/prometheus/README +++ b/contrib/prometheus/README.md @@ -6,8 +6,10 @@ To use it, first install prometheus by following the instructions at http://prometheus.io/ ### for Prometheus v1 + Add a new job to the main prometheus.conf file: +```yaml job: { name: "synapse" @@ -15,10 +17,12 @@ Add a new job to the main prometheus.conf file: target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics" } } +``` ### for Prometheus v2 Add a new job to the main prometheus.yml file: +```yaml - job_name: "synapse" metrics_path: "/_synapse/metrics" # when endpoint uses https: @@ -26,11 +30,14 @@ Add a new job to the main prometheus.yml file: static_configs: - targets: ['SERVER.LOCATION:PORT'] +``` To use `synapse.rules` add +```yaml rule_files: - "/PATH/TO/synapse-v2.rules" +``` Metrics are disabled by default when running synapse; they must be enabled with the 'enable-metrics' option, either in the synapse config file or as a diff --git a/synapse/__init__.py b/synapse/__init__.py index 5da59aa924..e5f680bb31 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.0rc2" +__version__ = "0.99.0rc3" diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 28857bfc1c..bd97241ab4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -895,14 +895,17 @@ class SyncHandler(object): Returns: Deferred(SyncResult) """ - logger.info("Calculating sync response for %r", sync_config.user) - # NB: The now_token gets changed by some of the generate_sync_* methods, # this is due to some of the underlying streams not supporting the ability # to query up to a given point. # Always use the `now_token` in `SyncResultBuilder` now_token = yield self.event_sources.get_current_token() + logger.info( + "Calculating sync response for %r between %s and %s", + sync_config.user, since_token, now_token, + ) + user_id = sync_config.user.to_string() app_service = self.store.get_app_service_by_user_id(user_id) if app_service: @@ -1390,6 +1393,12 @@ class SyncHandler(object): room_entries = [] invited = [] for room_id, events in iteritems(mem_change_events_by_room_id): + logger.info( + "Membership changes in %s: [%s]", + room_id, + ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), + ) + non_joins = [e for e in events if e.membership != Membership.JOIN] has_join = len(non_joins) != len(events) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 57dae324c7..1716be529a 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -161,6 +161,12 @@ class EventsWorkerStore(SQLBaseStore): log_ctx = LoggingContext.current_context() log_ctx.record_event_fetch(len(missing_events_ids)) + # Note that _enqueue_events is also responsible for turning db rows + # into FrozenEvents (via _get_event_from_row), which involves seeing if + # the events have been redacted, and if so pulling the redaction event out + # of the database to check it. + # + # _enqueue_events is a bit of a rubbish name but naming is hard. missing_events = yield self._enqueue_events( missing_events_ids, allow_rejected=allow_rejected, @@ -179,14 +185,35 @@ class EventsWorkerStore(SQLBaseStore): # instead. if not allow_rejected and entry.event.type == EventTypes.Redaction: if entry.event.internal_metadata.need_to_check_redaction(): - orig = yield self.get_event( - entry.event.redacts, + # XXX: we need to avoid calling get_event here. + # + # The problem is that we end up at this point when an event + # which has been redacted is pulled out of the database by + # _enqueue_events, because _enqueue_events needs to check the + # redaction before it can cache the redacted event. So obviously, + # calling get_event to get the redacted event out of the database + # gives us an infinite loop. + # + # For now (quick hack to fix during 0.99 release cycle), we just + # go and fetch the relevant row from the db, but it would be nice + # to think about how we can cache this rather than hit the db + # every time we access a redaction event. + # + # One thought on how to do this: + # 1. split _get_events up so that it is divided into (a) get the + # rawish event from the db/cache, (b) do the redaction/rejection + # filtering + # 2. have _get_event_from_row just call the first half of that + + orig_sender = yield self._simple_select_one_onecol( + table="events", + keyvalues={"event_id": entry.event.redacts}, + retcol="sender", allow_none=True, - allow_rejected=True, - get_prev_content=False, ) + expected_domain = get_domain_from_id(entry.event.sender) - if orig and get_domain_from_id(orig.sender) == expected_domain: + if orig_sender and get_domain_from_id(orig_sender) == expected_domain: # This redaction event is allowed. Mark as not needing a # recheck. entry.event.internal_metadata.recheck_redaction = False |