summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--UPGRADE.rst17
-rw-r--r--changelog.d/6266.misc1
-rw-r--r--changelog.d/6354.feature1
-rw-r--r--changelog.d/6409.feature1
-rw-r--r--changelog.d/6451.bugfix1
-rw-r--r--changelog.d/6458.doc1
-rw-r--r--changelog.d/6462.bugfix1
-rw-r--r--docs/admin_api/media_admin_api.md17
-rw-r--r--docs/sample_config.yaml13
-rw-r--r--synapse/api/constants.py4
-rw-r--r--synapse/config/server.py28
-rw-r--r--synapse/handlers/federation.py8
-rw-r--r--synapse/handlers/message.py123
-rw-r--r--synapse/logging/_terse_json.py1
-rw-r--r--synapse/storage/data_stores/main/end_to_end_keys.py23
-rw-r--r--synapse/storage/data_stores/main/events.py126
-rw-r--r--synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql21
-rw-r--r--synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql3
-rw-r--r--synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql22
-rw-r--r--synmark/__init__.py72
-rw-r--r--synmark/__main__.py90
-rw-r--r--synmark/suites/__init__.py3
-rw-r--r--synmark/suites/logging.py118
-rw-r--r--tests/federation/transport/test_server.py52
-rw-r--r--tests/rest/client/test_ephemeral_message.py101
-rw-r--r--tox.ini9
26 files changed, 827 insertions, 30 deletions
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 5ebf16a73e..d9020f2663 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -75,6 +75,23 @@ for example:
      wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
      dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
 
+Upgrading to v1.7.0
+===================
+
+In an attempt to configure Synapse in a privacy preserving way, the default
+behaviours of ``allow_public_rooms_without_auth`` and
+``allow_public_rooms_over_federation`` have been inverted. This means that by
+default, only authenticated users querying the Client/Server API will be able
+to query the room directory, and relatedly that the server will not share
+room directory information with other servers over federation.
+
+If your installation does not explicitly set these settings one way or the other
+and you want either setting to be ``true`` then it will necessary to update
+your homeserver configuration file accordingly.
+
+For more details on the surrounding context see our `explainer
+<https://matrix.org/blog/2019/11/09/avoiding-unwelcome-visitors-on-private-matrix-servers>`_.
+
 
 Upgrading to v1.5.0
 ===================
diff --git a/changelog.d/6266.misc b/changelog.d/6266.misc
new file mode 100644
index 0000000000..634e421a79
--- /dev/null
+++ b/changelog.d/6266.misc
@@ -0,0 +1 @@
+Add benchmarks for structured logging and improve output performance.
diff --git a/changelog.d/6354.feature b/changelog.d/6354.feature
new file mode 100644
index 0000000000..fed9db884b
--- /dev/null
+++ b/changelog.d/6354.feature
@@ -0,0 +1 @@
+Configure privacy preserving settings by default for the room directory.
diff --git a/changelog.d/6409.feature b/changelog.d/6409.feature
new file mode 100644
index 0000000000..653ff5a5ad
--- /dev/null
+++ b/changelog.d/6409.feature
@@ -0,0 +1 @@
+Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228).
diff --git a/changelog.d/6451.bugfix b/changelog.d/6451.bugfix
new file mode 100644
index 0000000000..23b67583ec
--- /dev/null
+++ b/changelog.d/6451.bugfix
@@ -0,0 +1 @@
+Fix uploading multiple cross signing signatures for the same user.
diff --git a/changelog.d/6458.doc b/changelog.d/6458.doc
new file mode 100644
index 0000000000..3a9f831d89
--- /dev/null
+++ b/changelog.d/6458.doc
@@ -0,0 +1 @@
+Write some docs for the quarantine_media api.
diff --git a/changelog.d/6462.bugfix b/changelog.d/6462.bugfix
new file mode 100644
index 0000000000..c435939526
--- /dev/null
+++ b/changelog.d/6462.bugfix
@@ -0,0 +1 @@
+Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted.
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 5e9f8e5d84..8b3666d5f5 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -21,3 +21,20 @@ It returns a JSON body like the following:
     ]
 }
 ```
+
+# Quarantine media in a room
+
+This API 'quarantines' all the media in a room.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/quarantine_media/<room_id>
+
+{}
+```
+
+Quarantining media means that it is marked as inaccessible by users. It applies
+to any local media, and any locally-cached copies of remote media.
+
+The media file itself (and any thumbnails) is not deleted from the server.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index c7391f0c48..10664ae8f7 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -54,15 +54,16 @@ pid_file: DATADIR/homeserver.pid
 #
 #require_auth_for_profile_requests: true
 
-# If set to 'false', requires authentication to access the server's public rooms
-# directory through the client API. Defaults to 'true'.
+# If set to 'true', removes the need for authentication to access the server's
+# public rooms directory through the client API, meaning that anyone can
+# query the room directory. Defaults to 'false'.
 #
-#allow_public_rooms_without_auth: false
+#allow_public_rooms_without_auth: true
 
-# If set to 'false', forbids any other homeserver to fetch the server's public
-# rooms directory via federation. Defaults to 'true'.
+# If set to 'true', allows any other homeserver to fetch the server's public
+# rooms directory via federation. Defaults to 'false'.
 #
-#allow_public_rooms_over_federation: false
+#allow_public_rooms_over_federation: true
 
 # The default room version for newly created rooms.
 #
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index e3f086f1c3..69cef369a5 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -147,3 +147,7 @@ class EventContentFields(object):
 
     # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
     LABELS = "org.matrix.labels"
+
+    # Timestamp to delete the event after
+    # cf https://github.com/matrix-org/matrix-doc/pull/2228
+    SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 7a9d711669..a4bef00936 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -118,15 +118,16 @@ class ServerConfig(Config):
             self.allow_public_rooms_without_auth = False
             self.allow_public_rooms_over_federation = False
         else:
-            # If set to 'False', requires authentication to access the server's public
-            # rooms directory through the client API. Defaults to 'True'.
+            # If set to 'true', removes the need for authentication to access the server's
+            # public rooms directory through the client API, meaning that anyone can
+            # query the room directory. Defaults to 'false'.
             self.allow_public_rooms_without_auth = config.get(
-                "allow_public_rooms_without_auth", True
+                "allow_public_rooms_without_auth", False
             )
-            # If set to 'False', forbids any other homeserver to fetch the server's public
-            # rooms directory via federation. Defaults to 'True'.
+            # If set to 'true', allows any other homeserver to fetch the server's public
+            # rooms directory via federation. Defaults to 'false'.
             self.allow_public_rooms_over_federation = config.get(
-                "allow_public_rooms_over_federation", True
+                "allow_public_rooms_over_federation", False
             )
 
         default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
@@ -490,6 +491,8 @@ class ServerConfig(Config):
             "cleanup_extremities_with_dummy_events", True
         )
 
+        self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
+
     def has_tls_listener(self) -> bool:
         return any(l["tls"] for l in self.listeners)
 
@@ -618,15 +621,16 @@ class ServerConfig(Config):
         #
         #require_auth_for_profile_requests: true
 
-        # If set to 'false', requires authentication to access the server's public rooms
-        # directory through the client API. Defaults to 'true'.
+        # If set to 'true', removes the need for authentication to access the server's
+        # public rooms directory through the client API, meaning that anyone can
+        # query the room directory. Defaults to 'false'.
         #
-        #allow_public_rooms_without_auth: false
+        #allow_public_rooms_without_auth: true
 
-        # If set to 'false', forbids any other homeserver to fetch the server's public
-        # rooms directory via federation. Defaults to 'true'.
+        # If set to 'true', allows any other homeserver to fetch the server's public
+        # rooms directory via federation. Defaults to 'false'.
         #
-        #allow_public_rooms_over_federation: false
+        #allow_public_rooms_over_federation: true
 
         # The default room version for newly created rooms.
         #
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d3267734f7..d9d0cd9eef 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -121,6 +121,7 @@ class FederationHandler(BaseHandler):
         self.pusher_pool = hs.get_pusherpool()
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self._message_handler = hs.get_message_handler()
         self._server_notices_mxid = hs.config.server_notices_mxid
         self.config = hs.config
         self.http_client = hs.get_simple_http_client()
@@ -141,6 +142,8 @@ class FederationHandler(BaseHandler):
 
         self.third_party_event_rules = hs.get_third_party_event_rules()
 
+        self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False):
         """ Process a PDU received via a federation /send/ transaction, or
@@ -2715,6 +2718,11 @@ class FederationHandler(BaseHandler):
                 event_and_contexts, backfilled=backfilled
             )
 
+            if self._ephemeral_messages_enabled:
+                for (event, context) in event_and_contexts:
+                    # If there's an expiry timestamp on the event, schedule its expiry.
+                    self._message_handler.maybe_schedule_expiry(event)
+
             if not backfilled:  # Never notify for backfilled events
                 for event, _ in event_and_contexts:
                     yield self._notify_persisted_event(event, max_stream_id)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 3b0156f516..4f53a5f5dc 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Optional
 
 from six import iteritems, itervalues, string_types
 
@@ -22,9 +23,16 @@ from canonicaljson import encode_canonical_json, json
 
 from twisted.internet import defer
 from twisted.internet.defer import succeed
+from twisted.internet.interfaces import IDelayedCall
 
 from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes
+from synapse.api.constants import (
+    EventContentFields,
+    EventTypes,
+    Membership,
+    RelationTypes,
+    UserTypes,
+)
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -62,6 +70,17 @@ class MessageHandler(object):
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
         self._event_serializer = hs.get_event_client_serializer()
+        self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+        self._is_worker_app = bool(hs.config.worker_app)
+
+        # The scheduled call to self._expire_event. None if no call is currently
+        # scheduled.
+        self._scheduled_expiry = None  # type: Optional[IDelayedCall]
+
+        if not hs.config.worker_app:
+            run_as_background_process(
+                "_schedule_next_expiry", self._schedule_next_expiry
+            )
 
     @defer.inlineCallbacks
     def get_room_data(
@@ -225,6 +244,100 @@ class MessageHandler(object):
             for user_id, profile in iteritems(users_with_profile)
         }
 
+    def maybe_schedule_expiry(self, event):
+        """Schedule the expiry of an event if there's not already one scheduled,
+        or if the one running is for an event that will expire after the provided
+        timestamp.
+
+        This function needs to invalidate the event cache, which is only possible on
+        the master process, and therefore needs to be run on there.
+
+        Args:
+            event (EventBase): The event to schedule the expiry of.
+        """
+        assert not self._is_worker_app
+
+        expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
+        if not isinstance(expiry_ts, int) or event.is_state():
+            return
+
+        # _schedule_expiry_for_event won't actually schedule anything if there's already
+        # a task scheduled for a timestamp that's sooner than the provided one.
+        self._schedule_expiry_for_event(event.event_id, expiry_ts)
+
+    @defer.inlineCallbacks
+    def _schedule_next_expiry(self):
+        """Retrieve the ID and the expiry timestamp of the next event to be expired,
+        and schedule an expiry task for it.
+
+        If there's no event left to expire, set _expiry_scheduled to None so that a
+        future call to save_expiry_ts can schedule a new expiry task.
+        """
+        # Try to get the expiry timestamp of the next event to expire.
+        res = yield self.store.get_next_event_to_expire()
+        if res:
+            event_id, expiry_ts = res
+            self._schedule_expiry_for_event(event_id, expiry_ts)
+
+    def _schedule_expiry_for_event(self, event_id, expiry_ts):
+        """Schedule an expiry task for the provided event if there's not already one
+        scheduled at a timestamp that's sooner than the provided one.
+
+        Args:
+            event_id (str): The ID of the event to expire.
+            expiry_ts (int): The timestamp at which to expire the event.
+        """
+        if self._scheduled_expiry:
+            # If the provided timestamp refers to a time before the scheduled time of the
+            # next expiry task, cancel that task and reschedule it for this timestamp.
+            next_scheduled_expiry_ts = self._scheduled_expiry.getTime() * 1000
+            if expiry_ts < next_scheduled_expiry_ts:
+                self._scheduled_expiry.cancel()
+            else:
+                return
+
+        # Figure out how many seconds we need to wait before expiring the event.
+        now_ms = self.clock.time_msec()
+        delay = (expiry_ts - now_ms) / 1000
+
+        # callLater doesn't support negative delays, so trim the delay to 0 if we're
+        # in that case.
+        if delay < 0:
+            delay = 0
+
+        logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
+
+        self._scheduled_expiry = self.clock.call_later(
+            delay,
+            run_as_background_process,
+            "_expire_event",
+            self._expire_event,
+            event_id,
+        )
+
+    @defer.inlineCallbacks
+    def _expire_event(self, event_id):
+        """Retrieve and expire an event that needs to be expired from the database.
+
+        If the event doesn't exist in the database, log it and delete the expiry date
+        from the database (so that we don't try to expire it again).
+        """
+        assert self._ephemeral_events_enabled
+
+        self._scheduled_expiry = None
+
+        logger.info("Expiring event %s", event_id)
+
+        try:
+            # Expire the event if we know about it. This function also deletes the expiry
+            # date from the database in the same database transaction.
+            yield self.store.expire_event(event_id)
+        except Exception as e:
+            logger.error("Could not expire event %s: %r", event_id, e)
+
+        # Schedule the expiry of the next event to expire.
+        yield self._schedule_next_expiry()
+
 
 # The duration (in ms) after which rooms should be removed
 # `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try
@@ -295,6 +408,10 @@ class EventCreationHandler(object):
                 5 * 60 * 1000,
             )
 
+        self._message_handler = hs.get_message_handler()
+
+        self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def create_event(
         self,
@@ -877,6 +994,10 @@ class EventCreationHandler(object):
             event, context=context
         )
 
+        if self._ephemeral_events_enabled:
+            # If there's an expiry timestamp on the event, schedule its expiry.
+            self._message_handler.maybe_schedule_expiry(event)
+
         yield self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id)
 
         def _notify():
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 05fc64f409..03934956f4 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -256,6 +256,7 @@ class TerseJSONToTCPLogObserver(object):
             # transport is the same, just trigger a resumeProducing.
             if self._producer and r.transport is self._producer.transport:
                 self._producer.resumeProducing()
+                self._connection_waiter = None
                 return
 
             # If the producer is still producing, stop it.
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index d8ad59ad93..643327b57b 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -145,13 +145,28 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         txn.execute(signature_sql, signature_query_params)
         rows = self.cursor_to_dict(txn)
 
+        # add each cross-signing signature to the correct device in the result dict.
         for row in rows:
+            signing_user_id = row["user_id"]
+            signing_key_id = row["key_id"]
             target_user_id = row["target_user_id"]
             target_device_id = row["target_device_id"]
-            if target_user_id in result and target_device_id in result[target_user_id]:
-                result[target_user_id][target_device_id].setdefault(
-                    "signatures", {}
-                ).setdefault(row["user_id"], {})[row["key_id"]] = row["signature"]
+            signature = row["signature"]
+
+            target_user_result = result.get(target_user_id)
+            if not target_user_result:
+                continue
+
+            target_device_result = target_user_result.get(target_device_id)
+            if not target_device_result:
+                # note that target_device_result will be None for deleted devices.
+                continue
+
+            target_device_signatures = target_device_result.setdefault("signatures", {})
+            signing_user_signatures = target_device_signatures.setdefault(
+                signing_user_id, {}
+            )
+            signing_user_signatures[signing_key_id] = signature
 
         log_kv(result)
         return result
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 2737a1d3ae..79c91fe284 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -130,6 +130,8 @@ class EventsStore(
         if self.hs.config.redaction_retention_period is not None:
             hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
 
+        self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def _read_forward_extremities(self):
         def fetch(txn):
@@ -940,6 +942,12 @@ class EventsStore(
                     txn, event.event_id, labels, event.room_id, event.depth
                 )
 
+            if self._ephemeral_messages_enabled:
+                # If there's an expiry timestamp on the event, store it.
+                expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
+                if isinstance(expiry_ts, int) and not event.is_state():
+                    self._insert_event_expiry_txn(txn, event.event_id, expiry_ts)
+
         # Insert into the room_memberships table.
         self._store_room_members_txn(
             txn,
@@ -1101,12 +1109,7 @@ class EventsStore(
         def _update_censor_txn(txn):
             for redaction_id, event_id, pruned_json in updates:
                 if pruned_json:
-                    self._simple_update_one_txn(
-                        txn,
-                        table="event_json",
-                        keyvalues={"event_id": event_id},
-                        updatevalues={"json": pruned_json},
-                    )
+                    self._censor_event_txn(txn, event_id, pruned_json)
 
                 self._simple_update_one_txn(
                     txn,
@@ -1117,6 +1120,22 @@ class EventsStore(
 
         yield self.runInteraction("_update_censor_txn", _update_censor_txn)
 
+    def _censor_event_txn(self, txn, event_id, pruned_json):
+        """Censor an event by replacing its JSON in the event_json table with the
+        provided pruned JSON.
+
+        Args:
+            txn (LoggingTransaction): The database transaction.
+            event_id (str): The ID of the event to censor.
+            pruned_json (str): The pruned JSON
+        """
+        self._simple_update_one_txn(
+            txn,
+            table="event_json",
+            keyvalues={"event_id": event_id},
+            updatevalues={"json": pruned_json},
+        )
+
     @defer.inlineCallbacks
     def count_daily_messages(self):
         """
@@ -1957,6 +1976,101 @@ class EventsStore(
             ],
         )
 
+    def _insert_event_expiry_txn(self, txn, event_id, expiry_ts):
+        """Save the expiry timestamp associated with a given event ID.
+
+        Args:
+            txn (LoggingTransaction): The database transaction to use.
+            event_id (str): The event ID the expiry timestamp is associated with.
+            expiry_ts (int): The timestamp at which to expire (delete) the event.
+        """
+        return self._simple_insert_txn(
+            txn=txn,
+            table="event_expiry",
+            values={"event_id": event_id, "expiry_ts": expiry_ts},
+        )
+
+    @defer.inlineCallbacks
+    def expire_event(self, event_id):
+        """Retrieve and expire an event that has expired, and delete its associated
+        expiry timestamp. If the event can't be retrieved, delete its associated
+        timestamp so we don't try to expire it again in the future.
+
+        Args:
+             event_id (str): The ID of the event to delete.
+        """
+        # Try to retrieve the event's content from the database or the event cache.
+        event = yield self.get_event(event_id)
+
+        def delete_expired_event_txn(txn):
+            # Delete the expiry timestamp associated with this event from the database.
+            self._delete_event_expiry_txn(txn, event_id)
+
+            if not event:
+                # If we can't find the event, log a warning and delete the expiry date
+                # from the database so that we don't try to expire it again in the
+                # future.
+                logger.warning(
+                    "Can't expire event %s because we don't have it.", event_id
+                )
+                return
+
+            # Prune the event's dict then convert it to JSON.
+            pruned_json = encode_json(prune_event_dict(event.get_dict()))
+
+            # Update the event_json table to replace the event's JSON with the pruned
+            # JSON.
+            self._censor_event_txn(txn, event.event_id, pruned_json)
+
+            # We need to invalidate the event cache entry for this event because we
+            # changed its content in the database. We can't call
+            # self._invalidate_cache_and_stream because self.get_event_cache isn't of the
+            # right type.
+            txn.call_after(self._get_event_cache.invalidate, (event.event_id,))
+            # Send that invalidation to replication so that other workers also invalidate
+            # the event cache.
+            self._send_invalidation_to_replication(
+                txn, "_get_event_cache", (event.event_id,)
+            )
+
+        yield self.runInteraction("delete_expired_event", delete_expired_event_txn)
+
+    def _delete_event_expiry_txn(self, txn, event_id):
+        """Delete the expiry timestamp associated with an event ID without deleting the
+        actual event.
+
+        Args:
+            txn (LoggingTransaction): The transaction to use to perform the deletion.
+            event_id (str): The event ID to delete the associated expiry timestamp of.
+        """
+        return self._simple_delete_txn(
+            txn=txn, table="event_expiry", keyvalues={"event_id": event_id}
+        )
+
+    def get_next_event_to_expire(self):
+        """Retrieve the entry with the lowest expiry timestamp in the event_expiry
+        table, or None if there's no more event to expire.
+
+        Returns: Deferred[Optional[Tuple[str, int]]]
+            A tuple containing the event ID as its first element and an expiry timestamp
+            as its second one, if there's at least one row in the event_expiry table.
+            None otherwise.
+        """
+
+        def get_next_event_to_expire_txn(txn):
+            txn.execute(
+                """
+                SELECT event_id, expiry_ts FROM event_expiry
+                ORDER BY expiry_ts ASC LIMIT 1
+                """
+            )
+
+            return txn.fetchone()
+
+        return self.runInteraction(
+            desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
+        )
+
 
 AllNewEventsResult = namedtuple(
     "AllNewEventsResult",
diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql b/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
new file mode 100644
index 0000000000..81a36a8b1d
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
@@ -0,0 +1,21 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_expiry (
+    event_id TEXT PRIMARY KEY,
+    expiry_ts BIGINT NOT NULL
+);
+
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry(expiry_ts);
diff --git a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
index 27a96123e3..5c5fffcafb 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
+++ b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
@@ -40,7 +40,8 @@ CREATE TABLE IF NOT EXISTS e2e_cross_signing_signatures (
     signature TEXT NOT NULL
 );
 
-CREATE UNIQUE INDEX e2e_cross_signing_signatures_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
+-- replaced by the index created in signing_keys_nonunique_signatures.sql
+-- CREATE UNIQUE INDEX e2e_cross_signing_signatures_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
 
 -- stream of user signature updates
 CREATE TABLE IF NOT EXISTS user_signature_stream (
diff --git a/synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql b/synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql
new file mode 100644
index 0000000000..0aa90ebf0c
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql
@@ -0,0 +1,22 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* The cross-signing signatures index should not be a unique index, because a
+ * user may upload multiple signatures for the same target user. The previous
+ * index was unique, so delete it if it's there and create a new non-unique
+ * index. */
+
+DROP INDEX IF EXISTS e2e_cross_signing_signatures_idx; CREATE INDEX IF NOT
+EXISTS e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
diff --git a/synmark/__init__.py b/synmark/__init__.py
new file mode 100644
index 0000000000..570eb818d9
--- /dev/null
+++ b/synmark/__init__.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from twisted.internet import epollreactor
+from twisted.internet.main import installReactor
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.util import Clock
+
+from tests.utils import default_config, setup_test_homeserver
+
+
+async def make_homeserver(reactor, config=None):
+    """
+    Make a Homeserver suitable for running benchmarks against.
+
+    Args:
+        reactor: A Twisted reactor to run under.
+        config: A HomeServerConfig to use, or None.
+    """
+    cleanup_tasks = []
+    clock = Clock(reactor)
+
+    if not config:
+        config = default_config("test")
+
+    config_obj = HomeServerConfig()
+    config_obj.parse_config_dict(config, "", "")
+
+    hs = await setup_test_homeserver(
+        cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock
+    )
+    stor = hs.get_datastore()
+
+    # Run the database background updates.
+    if hasattr(stor, "do_next_background_update"):
+        while not await stor.has_completed_background_updates():
+            await stor.do_next_background_update(1)
+
+    def cleanup():
+        for i in cleanup_tasks:
+            i()
+
+    return hs, clock.sleep, cleanup
+
+
+def make_reactor():
+    """
+    Instantiate and install a Twisted reactor suitable for testing (i.e. not the
+    default global one).
+    """
+    reactor = epollreactor.EPollReactor()
+
+    if "twisted.internet.reactor" in sys.modules:
+        del sys.modules["twisted.internet.reactor"]
+    installReactor(reactor)
+
+    return reactor
diff --git a/synmark/__main__.py b/synmark/__main__.py
new file mode 100644
index 0000000000..ac59befbd4
--- /dev/null
+++ b/synmark/__main__.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+from contextlib import redirect_stderr
+from io import StringIO
+
+import pyperf
+from synmark import make_reactor
+from synmark.suites import SUITES
+
+from twisted.internet.defer import ensureDeferred
+from twisted.logger import globalLogBeginner, textFileLogObserver
+from twisted.python.failure import Failure
+
+from tests.utils import setupdb
+
+
+def make_test(main):
+    """
+    Take a benchmark function and wrap it in a reactor start and stop.
+    """
+
+    def _main(loops):
+
+        reactor = make_reactor()
+
+        file_out = StringIO()
+        with redirect_stderr(file_out):
+
+            d = ensureDeferred(main(reactor, loops))
+
+            def on_done(_):
+                if isinstance(_, Failure):
+                    _.printTraceback()
+                    print(file_out.getvalue())
+                reactor.stop()
+                return _
+
+            d.addBoth(on_done)
+            reactor.run()
+
+        return d.result
+
+    return _main
+
+
+if __name__ == "__main__":
+
+    def add_cmdline_args(cmd, args):
+        if args.log:
+            cmd.extend(["--log"])
+
+    runner = pyperf.Runner(
+        processes=3, min_time=2, show_name=True, add_cmdline_args=add_cmdline_args
+    )
+    runner.argparser.add_argument("--log", action="store_true")
+    runner.parse_args()
+
+    orig_loops = runner.args.loops
+    runner.args.inherit_environ = ["SYNAPSE_POSTGRES"]
+
+    if runner.args.worker:
+        if runner.args.log:
+            globalLogBeginner.beginLoggingTo(
+                [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False
+            )
+        setupdb()
+
+    for suite, loops in SUITES:
+        if loops:
+            runner.args.loops = loops
+        else:
+            runner.args.loops = orig_loops
+            loops = "auto"
+        runner.bench_time_func(
+            suite.__name__ + "_" + str(loops), make_test(suite.main),
+        )
diff --git a/synmark/suites/__init__.py b/synmark/suites/__init__.py
new file mode 100644
index 0000000000..cfa3b0ba38
--- /dev/null
+++ b/synmark/suites/__init__.py
@@ -0,0 +1,3 @@
+from . import logging
+
+SUITES = [(logging, 1000), (logging, 10000), (logging, None)]
diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py
new file mode 100644
index 0000000000..d8e4c7d58f
--- /dev/null
+++ b/synmark/suites/logging.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+from io import StringIO
+
+from mock import Mock
+
+from pyperf import perf_counter
+from synmark import make_homeserver
+
+from twisted.internet.defer import Deferred
+from twisted.internet.protocol import ServerFactory
+from twisted.logger import LogBeginner, Logger, LogPublisher
+from twisted.protocols.basic import LineOnlyReceiver
+
+from synapse.logging._structured import setup_structured_logging
+
+
+class LineCounter(LineOnlyReceiver):
+
+    delimiter = b"\n"
+
+    def __init__(self, *args, **kwargs):
+        self.count = 0
+        super().__init__(*args, **kwargs)
+
+    def lineReceived(self, line):
+        self.count += 1
+
+        if self.count >= self.factory.wait_for and self.factory.on_done:
+            on_done = self.factory.on_done
+            self.factory.on_done = None
+            on_done.callback(True)
+
+
+async def main(reactor, loops):
+    """
+    Benchmark how long it takes to send `loops` messages.
+    """
+    servers = []
+
+    def protocol():
+        p = LineCounter()
+        servers.append(p)
+        return p
+
+    logger_factory = ServerFactory.forProtocol(protocol)
+    logger_factory.wait_for = loops
+    logger_factory.on_done = Deferred()
+    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")
+
+    hs, wait, cleanup = await make_homeserver(reactor)
+
+    errors = StringIO()
+    publisher = LogPublisher()
+    mock_sys = Mock()
+    beginner = LogBeginner(
+        publisher, errors, mock_sys, warnings, initialBufferSize=loops
+    )
+
+    log_config = {
+        "loggers": {"synapse": {"level": "DEBUG"}},
+        "drains": {
+            "tersejson": {
+                "type": "network_json_terse",
+                "host": "127.0.0.1",
+                "port": port.getHost().port,
+                "maximum_buffer": 100,
+            }
+        },
+    }
+
+    logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher)
+    logging_system = setup_structured_logging(
+        hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False
+    )
+
+    # Wait for it to connect...
+    await logging_system._observers[0]._service.whenConnected()
+
+    start = perf_counter()
+
+    # Send a bunch of useful messages
+    for i in range(0, loops):
+        logger.info("test message %s" % (i,))
+
+        if (
+            len(logging_system._observers[0]._buffer)
+            == logging_system._observers[0].maximum_buffer
+        ):
+            while (
+                len(logging_system._observers[0]._buffer)
+                > logging_system._observers[0].maximum_buffer / 2
+            ):
+                await wait(0.01)
+
+    await logger_factory.on_done
+
+    end = perf_counter() - start
+
+    logging_system.stop()
+    port.stopListening()
+    cleanup()
+
+    return end
diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py
new file mode 100644
index 0000000000..27d83bb7d9
--- /dev/null
+++ b/tests/federation/transport/test_server.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from synapse.config.ratelimiting import FederationRateLimitConfig
+from synapse.federation.transport import server
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+from tests import unittest
+from tests.unittest import override_config
+
+
+class RoomDirectoryFederationTests(unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, homeserver):
+        class Authenticator(object):
+            def authenticate_request(self, request, content):
+                return defer.succeed("otherserver.nottld")
+
+        ratelimiter = FederationRateLimiter(clock, FederationRateLimitConfig())
+        server.register_servlets(
+            homeserver, self.resource, Authenticator(), ratelimiter
+        )
+
+    @override_config({"allow_public_rooms_over_federation": False})
+    def test_blocked_public_room_list_over_federation(self):
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/publicRooms"
+        )
+        self.render(request)
+        self.assertEquals(403, channel.code)
+
+    @override_config({"allow_public_rooms_over_federation": True})
+    def test_open_public_room_list_over_federation(self):
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/publicRooms"
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py
new file mode 100644
index 0000000000..5e9c07ebf3
--- /dev/null
+++ b/tests/rest/client/test_ephemeral_message.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.api.constants import EventContentFields, EventTypes
+from synapse.rest import admin
+from synapse.rest.client.v1 import room
+
+from tests import unittest
+
+
+class EphemeralMessageTestCase(unittest.HomeserverTestCase):
+
+    user_id = "@user:test"
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        config = self.default_config()
+
+        config["enable_ephemeral_messages"] = True
+
+        self.hs = self.setup_test_homeserver(config=config)
+        return self.hs
+
+    def prepare(self, reactor, clock, homeserver):
+        self.room_id = self.helper.create_room_as(self.user_id)
+
+    def test_message_expiry_no_delay(self):
+        """Tests that sending a message sent with a m.self_destruct_after field set to the
+        past results in that event being deleted right away.
+        """
+        # Send a message in the room that has expired. From here, the reactor clock is
+        # at 200ms, so 0 is in the past, and even if that wasn't the case and the clock
+        # is at 0ms the code path is the same if the event's expiry timestamp is the
+        # current timestamp.
+        res = self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={
+                "msgtype": "m.text",
+                "body": "hello",
+                EventContentFields.SELF_DESTRUCT_AFTER: 0,
+            },
+        )
+        event_id = res["event_id"]
+
+        # Check that we can't retrieve the content of the event.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertFalse(bool(event_content), event_content)
+
+    def test_message_expiry_delay(self):
+        """Tests that sending a message with a m.self_destruct_after field set to the
+        future results in that event not being deleted right away, but advancing the
+        clock to after that expiry timestamp causes the event to be deleted.
+        """
+        # Send a message in the room that'll expire in 1s.
+        res = self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={
+                "msgtype": "m.text",
+                "body": "hello",
+                EventContentFields.SELF_DESTRUCT_AFTER: self.clock.time_msec() + 1000,
+            },
+        )
+        event_id = res["event_id"]
+
+        # Check that we can retrieve the content of the event before it has expired.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertTrue(bool(event_content), event_content)
+
+        # Advance the clock to after the deletion.
+        self.reactor.advance(1)
+
+        # Check that we can't retrieve the content of the event anymore.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertFalse(bool(event_content), event_content)
+
+    def get_event(self, room_id, event_id, expected_code=200):
+        url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+        request, channel = self.make_request("GET", url)
+        self.render(request)
+
+        self.assertEqual(channel.code, expected_code, channel.result)
+
+        return channel.json_body
diff --git a/tox.ini b/tox.ini
index 62b350ea6a..903a245fb0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -102,6 +102,15 @@ commands =
 
     {envbindir}/coverage run "{envbindir}/trial"  {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
 
+[testenv:benchmark]
+deps =
+    {[base]deps}
+    pyperf
+setenv =
+    SYNAPSE_POSTGRES = 1
+commands =
+    python -m synmark {posargs:}
+
 [testenv:packaging]
 skip_install=True
 deps =