summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml11
-rw-r--r--changelog.d/4006.misc1
-rw-r--r--changelog.d/4091.feature1
-rw-r--r--changelog.d/4095.bugfix1
-rw-r--r--changelog.d/4099.feature1
-rw-r--r--changelog.d/4100.feature1
-rw-r--r--changelog.d/4101.feature1
-rw-r--r--changelog.d/4106.removal1
-rw-r--r--changelog.d/4108.misc1
-rw-r--r--changelog.d/4109.misc1
-rw-r--r--changelog.d/4110.misc1
-rw-r--r--changelog.d/4118.removal1
-rw-r--r--changelog.d/4119.removal1
-rw-r--r--changelog.d/4120.removal1
-rw-r--r--changelog.d/4121.misc1
-rw-r--r--changelog.d/4122.bugfix1
-rw-r--r--changelog.d/4124.misc1
-rw-r--r--contrib/docker/docker-compose.yml2
-rwxr-xr-xjenkins/prepare_synapse.sh19
-rwxr-xr-xscripts-dev/make_identicons.pl39
-rw-r--r--synapse/api/constants.py1
-rw-r--r--synapse/api/urls.py1
-rwxr-xr-xsynapse/app/homeserver.py7
-rw-r--r--synapse/crypto/keyclient.py8
-rw-r--r--synapse/crypto/keyring.py110
-rw-r--r--synapse/federation/federation_server.py5
-rw-r--r--synapse/federation/transport/server.py9
-rw-r--r--synapse/handlers/directory.py34
-rw-r--r--synapse/handlers/message.py3
-rw-r--r--synapse/handlers/register.py9
-rw-r--r--synapse/handlers/room.py401
-rw-r--r--synapse/push/emailpusher.py5
-rw-r--r--synapse/push/mailer.py10
-rw-r--r--synapse/python_dependencies.py1
-rw-r--r--synapse/replication/tcp/client.py2
-rw-r--r--synapse/replication/tcp/protocol.py4
-rw-r--r--synapse/rest/__init__.py2
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py89
-rw-r--r--synapse/rest/key/v1/__init__.py14
-rw-r--r--synapse/rest/key/v1/server_key_resource.py92
-rw-r--r--synapse/rest/media/v1/identicon_resource.py68
-rw-r--r--synapse/rest/media/v1/media_repository.py2
-rw-r--r--synapse/server.py5
-rw-r--r--synapse/server.pyi6
-rw-r--r--synapse/storage/devices.py6
-rw-r--r--synapse/storage/events.py157
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/room.py2
-rw-r--r--synapse/storage/schema/delta/52/add_event_to_state_group_index.sql19
-rw-r--r--synapse/storage/state.py7
-rw-r--r--tests/push/__init__.py0
-rw-r--r--tests/push/test_email.py148
-rw-r--r--tests/server.py4
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py10
-rw-r--r--tests/test_mau.py2
-rw-r--r--tests/unittest.py9
-rw-r--r--tox.ini14
57 files changed, 871 insertions, 484 deletions
diff --git a/.travis.yml b/.travis.yml
index fd41841c77..655fab9d8e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -23,6 +23,9 @@ branches:
     - develop
     - /^release-v/
 
+# When running the tox environments that call Twisted Trial, we can pass the -j
+# flag to run the tests concurrently. We set this to 2 for CPU bound tests
+# (SQLite) and 4 for I/O bound tests (PostgreSQL).
 matrix:
   fast_finish: true
   include:
@@ -33,10 +36,10 @@ matrix:
     env: TOX_ENV="pep8,check_isort"
 
   - python: 2.7
-    env: TOX_ENV=py27
+    env: TOX_ENV=py27 TRIAL_FLAGS="-j 2"
 
   - python: 2.7
-    env: TOX_ENV=py27-old
+    env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
 
   - python: 2.7
     env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
@@ -44,10 +47,10 @@ matrix:
       - postgresql
 
   - python: 3.5
-    env: TOX_ENV=py35
+    env: TOX_ENV=py35 TRIAL_FLAGS="-j 2"
 
   - python: 3.6
-    env: TOX_ENV=py36
+    env: TOX_ENV=py36 TRIAL_FLAGS="-j 2"
 
   - python: 3.6
     env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
diff --git a/changelog.d/4006.misc b/changelog.d/4006.misc
new file mode 100644
index 0000000000..35ffa1c2d2
--- /dev/null
+++ b/changelog.d/4006.misc
@@ -0,0 +1 @@
+Delete unreferenced state groups during history purge
diff --git a/changelog.d/4091.feature b/changelog.d/4091.feature
new file mode 100644
index 0000000000..a3f7dbdcdd
--- /dev/null
+++ b/changelog.d/4091.feature
@@ -0,0 +1 @@
+Support for replacing rooms with new ones
diff --git a/changelog.d/4095.bugfix b/changelog.d/4095.bugfix
new file mode 100644
index 0000000000..76ee7148c2
--- /dev/null
+++ b/changelog.d/4095.bugfix
@@ -0,0 +1 @@
+Fix exceptions when using the email mailer on Python 3.
diff --git a/changelog.d/4099.feature b/changelog.d/4099.feature
new file mode 100644
index 0000000000..a3f7dbdcdd
--- /dev/null
+++ b/changelog.d/4099.feature
@@ -0,0 +1 @@
+Support for replacing rooms with new ones
diff --git a/changelog.d/4100.feature b/changelog.d/4100.feature
new file mode 100644
index 0000000000..a3f7dbdcdd
--- /dev/null
+++ b/changelog.d/4100.feature
@@ -0,0 +1 @@
+Support for replacing rooms with new ones
diff --git a/changelog.d/4101.feature b/changelog.d/4101.feature
new file mode 100644
index 0000000000..a3f7dbdcdd
--- /dev/null
+++ b/changelog.d/4101.feature
@@ -0,0 +1 @@
+Support for replacing rooms with new ones
diff --git a/changelog.d/4106.removal b/changelog.d/4106.removal
new file mode 100644
index 0000000000..7e63208daa
--- /dev/null
+++ b/changelog.d/4106.removal
@@ -0,0 +1 @@
+The disused and un-specced identicon generator has been removed.
diff --git a/changelog.d/4108.misc b/changelog.d/4108.misc
new file mode 100644
index 0000000000..85810c3d83
--- /dev/null
+++ b/changelog.d/4108.misc
@@ -0,0 +1 @@
+The "Received rdata" log messages on workers is now logged at DEBUG, not INFO.
diff --git a/changelog.d/4109.misc b/changelog.d/4109.misc
new file mode 100644
index 0000000000..566c683119
--- /dev/null
+++ b/changelog.d/4109.misc
@@ -0,0 +1 @@
+Reduce replication traffic for device lists
diff --git a/changelog.d/4110.misc b/changelog.d/4110.misc
new file mode 100644
index 0000000000..a50327ae34
--- /dev/null
+++ b/changelog.d/4110.misc
@@ -0,0 +1 @@
+Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character
diff --git a/changelog.d/4118.removal b/changelog.d/4118.removal
new file mode 100644
index 0000000000..6fb1d67b47
--- /dev/null
+++ b/changelog.d/4118.removal
@@ -0,0 +1 @@
+The obsolete and non-functional /pull federation endpoint has been removed.
diff --git a/changelog.d/4119.removal b/changelog.d/4119.removal
new file mode 100644
index 0000000000..81383ece6b
--- /dev/null
+++ b/changelog.d/4119.removal
@@ -0,0 +1 @@
+The deprecated v1 key exchange endpoints have been removed.
diff --git a/changelog.d/4120.removal b/changelog.d/4120.removal
new file mode 100644
index 0000000000..a7a567098f
--- /dev/null
+++ b/changelog.d/4120.removal
@@ -0,0 +1 @@
+Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2.
diff --git a/changelog.d/4121.misc b/changelog.d/4121.misc
new file mode 100644
index 0000000000..9c29d80c3f
--- /dev/null
+++ b/changelog.d/4121.misc
@@ -0,0 +1 @@
+Log some bits about room creation
diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix
new file mode 100644
index 0000000000..66dcfb18b9
--- /dev/null
+++ b/changelog.d/4122.bugfix
@@ -0,0 +1 @@
+Searches that request profile info now no longer fail with a 500.
diff --git a/changelog.d/4124.misc b/changelog.d/4124.misc
new file mode 100644
index 0000000000..28f438b9b2
--- /dev/null
+++ b/changelog.d/4124.misc
@@ -0,0 +1 @@
+Fix `tox` failure on old systems
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 3a8dfbae34..b1f6fcb7da 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -47,4 +47,4 @@ services:
       # You may store the database tables in a local folder..
       - ./schemas:/var/lib/postgresql/data
       # .. or store them on some high performance storage for better results
-      # - /path/to/ssd/storage:/var/lib/postfesql/data
+      # - /path/to/ssd/storage:/var/lib/postgresql/data
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
index d95ca846c4..016afb8baa 100755
--- a/jenkins/prepare_synapse.sh
+++ b/jenkins/prepare_synapse.sh
@@ -14,22 +14,3 @@ fi
 
 # set up the virtualenv
 tox -e py27 --notest -v
-
-TOX_BIN=$TOX_DIR/py27/bin
-
-# cryptography 2.2 requires setuptools >= 18.5.
-#
-# older versions of virtualenv (?) give us a virtualenv with the same version
-# of setuptools as is installed on the system python (and tox runs virtualenv
-# under python3, so we get the version of setuptools that is installed on that).
-#
-# anyway, make sure that we have a recent enough setuptools.
-$TOX_BIN/pip install 'setuptools>=18.5'
-
-# we also need a semi-recent version of pip, because old ones fail to install
-# the "enum34" dependency of cryptography.
-$TOX_BIN/pip install 'pip>=10'
-
-{ python synapse/python_dependencies.py
-  echo lxml
-} | xargs $TOX_BIN/pip install
diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl
deleted file mode 100755
index cbff63e298..0000000000
--- a/scripts-dev/make_identicons.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env perl
-
-use strict;
-use warnings;
-
-use DBI;
-use DBD::SQLite;
-use JSON;
-use Getopt::Long;
-
-my $db; # = "homeserver.db";
-my $server = "http://localhost:8008";
-my $size = 320;
-
-GetOptions("db|d=s",     \$db,
-           "server|s=s", \$server,
-           "width|w=i",  \$size) or usage();
-
-usage() unless $db;
-
-my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
-
-my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
-
-foreach (@$res) {
-    my ($token, $mxid) = ($_->[0], $_->[1]);
-    my ($user_id) = ($mxid =~ m/@(.*):/);
-    my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
-    if (!$url || $url =~ /#auto$/) {
-        `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
-        my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
-        my $content_uri = from_json($json)->{content_uri};
-        `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
-    }
-}
-
-sub usage {
-    die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
-}
\ No newline at end of file
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index c2630c4c64..5565e516d6 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -61,6 +61,7 @@ class LoginType(object):
 class EventTypes(object):
     Member = "m.room.member"
     Create = "m.room.create"
+    Tombstone = "m.room.tombstone"
     JoinRules = "m.room.join_rules"
     PowerLevels = "m.room.power_levels"
     Aliases = "m.room.aliases"
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 6d9f1ca0ef..f78695b657 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1"
 STATIC_PREFIX = "/_matrix/static"
 WEB_CLIENT_PREFIX = "/_matrix/client"
 CONTENT_REPO_PREFIX = "/_matrix/content"
-SERVER_KEY_PREFIX = "/_matrix/key/v1"
 SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
 MEDIA_PREFIX = "/_matrix/media/r0"
 LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 593e1e75db..415374a2ce 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -37,7 +37,6 @@ from synapse.api.urls import (
     FEDERATION_PREFIX,
     LEGACY_MEDIA_PREFIX,
     MEDIA_PREFIX,
-    SERVER_KEY_PREFIX,
     SERVER_KEY_V2_PREFIX,
     STATIC_PREFIX,
     WEB_CLIENT_PREFIX,
@@ -59,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem
 from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
-from synapse.rest.key.v1.server_key_resource import LocalKey
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.server import HomeServer
@@ -236,10 +234,7 @@ class SynapseHomeServer(HomeServer):
                 )
 
         if name in ["keys", "federation"]:
-            resources.update({
-                SERVER_KEY_PREFIX: LocalKey(self),
-                SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
-            })
+            resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
 
         if name == "webclient":
             resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
index 080c81f14b..d40e4b8591 100644
--- a/synapse/crypto/keyclient.py
+++ b/synapse/crypto/keyclient.py
@@ -15,6 +15,8 @@
 
 import logging
 
+from six.moves import urllib
+
 from canonicaljson import json
 
 from twisted.internet import defer, reactor
@@ -28,15 +30,15 @@ from synapse.util import logcontext
 
 logger = logging.getLogger(__name__)
 
-KEY_API_V1 = b"/_matrix/key/v1/"
+KEY_API_V2 = "/_matrix/key/v2/server/%s"
 
 
 @defer.inlineCallbacks
-def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
+def fetch_server_key(server_name, tls_client_options_factory, key_id):
     """Fetch the keys for a remote server."""
 
     factory = SynapseKeyClientFactory()
-    factory.path = path
+    factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
     factory.host = server_name
     endpoint = matrix_federation_endpoint(
         reactor, server_name, tls_client_options_factory, timeout=30
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index d89f94c219..515ebbc148 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd.
+# Copyright 2017, 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,6 @@ import hashlib
 import logging
 from collections import namedtuple
 
-from six.moves import urllib
-
 from signedjson.key import (
     decode_verify_key_bytes,
     encode_verify_key_base64,
@@ -395,32 +393,13 @@ class Keyring(object):
 
     @defer.inlineCallbacks
     def get_keys_from_server(self, server_name_and_key_ids):
-        @defer.inlineCallbacks
-        def get_key(server_name, key_ids):
-            keys = None
-            try:
-                keys = yield self.get_server_verify_key_v2_direct(
-                    server_name, key_ids
-                )
-            except Exception as e:
-                logger.info(
-                    "Unable to get key %r for %r directly: %s %s",
-                    key_ids, server_name,
-                    type(e).__name__, str(e),
-                )
-
-            if not keys:
-                keys = yield self.get_server_verify_key_v1_direct(
-                    server_name, key_ids
-                )
-
-                keys = {server_name: keys}
-
-            defer.returnValue(keys)
-
         results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
             [
-                run_in_background(get_key, server_name, key_ids)
+                run_in_background(
+                    self.get_server_verify_key_v2_direct,
+                    server_name,
+                    key_ids,
+                )
                 for server_name, key_ids in server_name_and_key_ids
             ],
             consumeErrors=True,
@@ -525,10 +504,7 @@ class Keyring(object):
                 continue
 
             (response, tls_certificate) = yield fetch_server_key(
-                server_name, self.hs.tls_client_options_factory,
-                path=("/_matrix/key/v2/server/%s" % (
-                    urllib.parse.quote(requested_key_id),
-                )).encode("ascii"),
+                server_name, self.hs.tls_client_options_factory, requested_key_id
             )
 
             if (u"signatures" not in response
@@ -657,78 +633,6 @@ class Keyring(object):
 
         defer.returnValue(results)
 
-    @defer.inlineCallbacks
-    def get_server_verify_key_v1_direct(self, server_name, key_ids):
-        """Finds a verification key for the server with one of the key ids.
-        Args:
-            server_name (str): The name of the server to fetch a key for.
-            keys_ids (list of str): The key_ids to check for.
-        """
-
-        # Try to fetch the key from the remote server.
-
-        (response, tls_certificate) = yield fetch_server_key(
-            server_name, self.hs.tls_client_options_factory
-        )
-
-        # Check the response.
-
-        x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1, tls_certificate
-        )
-
-        if ("signatures" not in response
-                or server_name not in response["signatures"]):
-            raise KeyLookupError("Key response not signed by remote server")
-
-        if "tls_certificate" not in response:
-            raise KeyLookupError("Key response missing TLS certificate")
-
-        tls_certificate_b64 = response["tls_certificate"]
-
-        if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
-            raise KeyLookupError("TLS certificate doesn't match")
-
-        # Cache the result in the datastore.
-
-        time_now_ms = self.clock.time_msec()
-
-        verify_keys = {}
-        for key_id, key_base64 in response["verify_keys"].items():
-            if is_signing_algorithm_supported(key_id):
-                key_bytes = decode_base64(key_base64)
-                verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                verify_key.time_added = time_now_ms
-                verify_keys[key_id] = verify_key
-
-        for key_id in response["signatures"][server_name]:
-            if key_id not in response["verify_keys"]:
-                raise KeyLookupError(
-                    "Key response must include verification keys for all"
-                    " signatures"
-                )
-            if key_id in verify_keys:
-                verify_signed_json(
-                    response,
-                    server_name,
-                    verify_keys[key_id]
-                )
-
-        yield self.store.store_server_certificate(
-            server_name,
-            server_name,
-            time_now_ms,
-            tls_certificate,
-        )
-
-        yield self.store_keys(
-            server_name=server_name,
-            from_server=server_name,
-            verify_keys=verify_keys,
-        )
-
-        defer.returnValue(verify_keys)
-
     def store_keys(self, server_name, from_server, verify_keys):
         """Store a collection of verify keys for a given server
         Args:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 0f9302a6a8..fa2cc550e2 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -324,11 +324,6 @@ class FederationServer(FederationBase):
             defer.returnValue((404, ""))
 
     @defer.inlineCallbacks
-    @log_function
-    def on_pull_request(self, origin, versions):
-        raise NotImplementedError("Pull transactions not implemented")
-
-    @defer.inlineCallbacks
     def on_query_request(self, query_type, args):
         received_queries_counter.labels(query_type).inc()
         resp = yield self.registry.on_query(query_type, args)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 7288d49074..3553f418f1 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet):
         defer.returnValue((code, response))
 
 
-class FederationPullServlet(BaseFederationServlet):
-    PATH = "/pull/"
-
-    # This is for when someone asks us for everything since version X
-    def on_GET(self, origin, content, query):
-        return self.handler.on_pull_request(query["origin"][0], query["v"])
-
-
 class FederationEventServlet(BaseFederationServlet):
     PATH = "/event/(?P<event_id>[^/]*)/"
 
@@ -1261,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
 
 FEDERATION_SERVLET_CLASSES = (
     FederationSendServlet,
-    FederationPullServlet,
     FederationEventServlet,
     FederationStateServlet,
     FederationStateIdsServlet,
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 7d67bf803a..0699731c13 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -138,9 +138,30 @@ class DirectoryHandler(BaseHandler):
             )
 
     @defer.inlineCallbacks
-    def delete_association(self, requester, room_alias):
-        # association deletion for human users
+    def delete_association(self, requester, room_alias, send_event=True):
+        """Remove an alias from the directory
 
+        (this is only meant for human users; AS users should call
+        delete_appservice_association)
+
+        Args:
+            requester (Requester):
+            room_alias (RoomAlias):
+            send_event (bool): Whether to send an updated m.room.aliases event.
+                Note that, if we delete the canonical alias, we will always attempt
+                to send an m.room.canonical_alias event
+
+        Returns:
+            Deferred[unicode]: room id that the alias used to point to
+
+        Raises:
+            NotFoundError: if the alias doesn't exist
+
+            AuthError: if the user doesn't have perms to delete the alias (ie, the user
+                is neither the creator of the alias, nor a server admin.
+
+            SynapseError: if the alias belongs to an AS
+        """
         user_id = requester.user.to_string()
 
         try:
@@ -168,10 +189,11 @@ class DirectoryHandler(BaseHandler):
         room_id = yield self._delete_association(room_alias)
 
         try:
-            yield self.send_room_alias_update_event(
-                requester,
-                room_id
-            )
+            if send_event:
+                yield self.send_room_alias_update_event(
+                    requester,
+                    room_id
+                )
 
             yield self._update_canonical_alias(
                 requester,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 969e588e73..a7cd779b02 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -427,6 +427,9 @@ class EventCreationHandler(object):
 
         if event.is_state():
             prev_state = yield self.deduplicate_state_event(event, context)
+            logger.info(
+                "Not bothering to persist duplicate state event %s", event.event_id,
+            )
             if prev_state is not None:
                 defer.returnValue(prev_state)
 
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index e9d7b25a36..d2beb275cf 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -50,7 +50,6 @@ class RegistrationHandler(BaseHandler):
         self._auth_handler = hs.get_auth_handler()
         self.profile_handler = hs.get_profile_handler()
         self.user_directory_handler = hs.get_user_directory_handler()
-        self.room_creation_handler = self.hs.get_room_creation_handler()
         self.captcha_client = CaptchaServerHttpClient(hs)
 
         self._next_generated_user_id = None
@@ -241,7 +240,10 @@ class RegistrationHandler(BaseHandler):
                     else:
                         # create room expects the localpart of the room alias
                         room_alias_localpart = room_alias.localpart
-                        yield self.room_creation_handler.create_room(
+
+                        # getting the RoomCreationHandler during init gives a dependency
+                        # loop
+                        yield self.hs.get_room_creation_handler().create_room(
                             fake_requester,
                             config={
                                 "preset": "public_chat",
@@ -254,9 +256,6 @@ class RegistrationHandler(BaseHandler):
             except Exception as e:
                 logger.error("Failed to join new user to %r: %r", r, e)
 
-        # We used to generate default identicons here, but nowadays
-        # we want clients to generate their own as part of their branding
-        # rather than there being consistent matrix-wide ones, so we don't.
         defer.returnValue((user_id, token))
 
     @defer.inlineCallbacks
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3ba92bdb4c..3928faa6e7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -21,7 +21,7 @@ import math
 import string
 from collections import OrderedDict
 
-from six import string_types
+from six import iteritems, string_types
 
 from twisted.internet import defer
 
@@ -32,10 +32,11 @@ from synapse.api.constants import (
     JoinRules,
     RoomCreationPreset,
 )
-from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
 from synapse.storage.state import StateFilter
 from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
 from synapse.util import stringutils
+from synapse.util.async_helpers import Linearizer
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
@@ -73,6 +74,334 @@ class RoomCreationHandler(BaseHandler):
 
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.room_member_handler = hs.get_room_member_handler()
+
+        # linearizer to stop two upgrades happening at once
+        self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
+
+    @defer.inlineCallbacks
+    def upgrade_room(self, requester, old_room_id, new_version):
+        """Replace a room with a new room with a different version
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_version (unicode): the new room version to use
+
+        Returns:
+            Deferred[unicode]: the new room id
+        """
+        yield self.ratelimit(requester)
+
+        user_id = requester.user.to_string()
+
+        with (yield self._upgrade_linearizer.queue(old_room_id)):
+            # start by allocating a new room id
+            r = yield self.store.get_room(old_room_id)
+            if r is None:
+                raise NotFoundError("Unknown room id %s" % (old_room_id,))
+            new_room_id = yield self._generate_room_id(
+                creator_id=user_id, is_public=r["is_public"],
+            )
+
+            logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
+
+            # we create and auth the tombstone event before properly creating the new
+            # room, to check our user has perms in the old room.
+            tombstone_event, tombstone_context = (
+                yield self.event_creation_handler.create_event(
+                    requester, {
+                        "type": EventTypes.Tombstone,
+                        "state_key": "",
+                        "room_id": old_room_id,
+                        "sender": user_id,
+                        "content": {
+                            "body": "This room has been replaced",
+                            "replacement_room": new_room_id,
+                        }
+                    },
+                    token_id=requester.access_token_id,
+                )
+            )
+            yield self.auth.check_from_context(tombstone_event, tombstone_context)
+
+            yield self.clone_exiting_room(
+                requester,
+                old_room_id=old_room_id,
+                new_room_id=new_room_id,
+                new_room_version=new_version,
+                tombstone_event_id=tombstone_event.event_id,
+            )
+
+            # now send the tombstone
+            yield self.event_creation_handler.send_nonmember_event(
+                requester, tombstone_event, tombstone_context,
+            )
+
+            old_room_state = yield tombstone_context.get_current_state_ids(self.store)
+
+            # update any aliases
+            yield self._move_aliases_to_new_room(
+                requester, old_room_id, new_room_id, old_room_state,
+            )
+
+            # and finally, shut down the PLs in the old room, and update them in the new
+            # room.
+            yield self._update_upgraded_room_pls(
+                requester, old_room_id, new_room_id, old_room_state,
+            )
+
+            defer.returnValue(new_room_id)
+
+    @defer.inlineCallbacks
+    def _update_upgraded_room_pls(
+            self, requester, old_room_id, new_room_id, old_room_state,
+    ):
+        """Send updated power levels in both rooms after an upgrade
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_room_id (unicode): the id of the replacement room
+            old_room_state (dict[tuple[str, str], str]): the state map for the old room
+
+        Returns:
+            Deferred
+        """
+        old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
+
+        if old_room_pl_event_id is None:
+            logger.warning(
+                "Not supported: upgrading a room with no PL event. Not setting PLs "
+                "in old room.",
+            )
+            return
+
+        old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
+
+        # we try to stop regular users from speaking by setting the PL required
+        # to send regular events and invites to 'Moderator' level. That's normally
+        # 50, but if the default PL in a room is 50 or more, then we set the
+        # required PL above that.
+
+        pl_content = dict(old_room_pl_state.content)
+        users_default = int(pl_content.get("users_default", 0))
+        restricted_level = max(users_default + 1, 50)
+
+        updated = False
+        for v in ("invite", "events_default"):
+            current = int(pl_content.get(v, 0))
+            if current < restricted_level:
+                logger.info(
+                    "Setting level for %s in %s to %i (was %i)",
+                    v, old_room_id, restricted_level, current,
+                )
+                pl_content[v] = restricted_level
+                updated = True
+            else:
+                logger.info(
+                    "Not setting level for %s (already %i)",
+                    v, current,
+                )
+
+        if updated:
+            try:
+                yield self.event_creation_handler.create_and_send_nonmember_event(
+                    requester, {
+                        "type": EventTypes.PowerLevels,
+                        "state_key": '',
+                        "room_id": old_room_id,
+                        "sender": requester.user.to_string(),
+                        "content": pl_content,
+                    }, ratelimit=False,
+                )
+            except AuthError as e:
+                logger.warning("Unable to update PLs in old room: %s", e)
+
+        logger.info("Setting correct PLs in new room")
+        yield self.event_creation_handler.create_and_send_nonmember_event(
+            requester, {
+                "type": EventTypes.PowerLevels,
+                "state_key": '',
+                "room_id": new_room_id,
+                "sender": requester.user.to_string(),
+                "content": old_room_pl_state.content,
+            }, ratelimit=False,
+        )
+
+    @defer.inlineCallbacks
+    def clone_exiting_room(
+            self, requester, old_room_id, new_room_id, new_room_version,
+            tombstone_event_id,
+    ):
+        """Populate a new room based on an old room
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_room_id (unicode): the id to give the new room (should already have been
+                created with _gemerate_room_id())
+            new_room_version (unicode): the new room version to use
+            tombstone_event_id (unicode|str): the ID of the tombstone event in the old
+                room.
+        Returns:
+            Deferred[None]
+        """
+        user_id = requester.user.to_string()
+
+        if not self.spam_checker.user_may_create_room(user_id):
+            raise SynapseError(403, "You are not permitted to create rooms")
+
+        creation_content = {
+            "room_version": new_room_version,
+            "predecessor": {
+                "room_id": old_room_id,
+                "event_id": tombstone_event_id,
+            }
+        }
+
+        initial_state = dict()
+
+        types_to_copy = (
+            (EventTypes.JoinRules, ""),
+            (EventTypes.Name, ""),
+            (EventTypes.Topic, ""),
+            (EventTypes.RoomHistoryVisibility, ""),
+            (EventTypes.GuestAccess, ""),
+            (EventTypes.RoomAvatar, ""),
+        )
+
+        old_room_state_ids = yield self.store.get_filtered_current_state_ids(
+            old_room_id, StateFilter.from_types(types_to_copy),
+        )
+        # map from event_id to BaseEvent
+        old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
+
+        for k, old_event_id in iteritems(old_room_state_ids):
+            old_event = old_room_state_events.get(old_event_id)
+            if old_event:
+                initial_state[k] = old_event.content
+
+        yield self._send_events_for_new_room(
+            requester,
+            new_room_id,
+
+            # we expect to override all the presets with initial_state, so this is
+            # somewhat arbitrary.
+            preset_config=RoomCreationPreset.PRIVATE_CHAT,
+
+            invite_list=[],
+            initial_state=initial_state,
+            creation_content=creation_content,
+        )
+
+        # XXX invites/joins
+        # XXX 3pid invites
+
+    @defer.inlineCallbacks
+    def _move_aliases_to_new_room(
+            self, requester, old_room_id, new_room_id, old_room_state,
+    ):
+        directory_handler = self.hs.get_handlers().directory_handler
+
+        aliases = yield self.store.get_aliases_for_room(old_room_id)
+
+        # check to see if we have a canonical alias.
+        canonical_alias = None
+        canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
+        if canonical_alias_event_id:
+            canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
+            if canonical_alias_event:
+                canonical_alias = canonical_alias_event.content.get("alias", "")
+
+        # first we try to remove the aliases from the old room (we suppress sending
+        # the room_aliases event until the end).
+        #
+        # Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
+        # and (b) unless the user is a server admin, which the user created.
+        #
+        # This is probably correct - given we don't allow such aliases to be deleted
+        # normally, it would be odd to allow it in the case of doing a room upgrade -
+        # but it makes the upgrade less effective, and you have to wonder why a room
+        # admin can't remove aliases that point to that room anyway.
+        # (cf https://github.com/matrix-org/synapse/issues/2360)
+        #
+        removed_aliases = []
+        for alias_str in aliases:
+            alias = RoomAlias.from_string(alias_str)
+            try:
+                yield directory_handler.delete_association(
+                    requester, alias, send_event=False,
+                )
+                removed_aliases.append(alias_str)
+            except SynapseError as e:
+                logger.warning(
+                    "Unable to remove alias %s from old room: %s",
+                    alias, e,
+                )
+
+        # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
+        # of this.
+        if not removed_aliases:
+            return
+
+        try:
+            # this can fail if, for some reason, our user doesn't have perms to send
+            # m.room.aliases events in the old room (note that we've already checked that
+            # they have perms to send a tombstone event, so that's not terribly likely).
+            #
+            # If that happens, it's regrettable, but we should carry on: it's the same
+            # as when you remove an alias from the directory normally - it just means that
+            # the aliases event gets out of sync with the directory
+            # (cf https://github.com/vector-im/riot-web/issues/2369)
+            yield directory_handler.send_room_alias_update_event(
+                requester, old_room_id,
+            )
+        except AuthError as e:
+            logger.warning(
+                "Failed to send updated alias event on old room: %s", e,
+            )
+
+        # we can now add any aliases we successfully removed to the new room.
+        for alias in removed_aliases:
+            try:
+                yield directory_handler.create_association(
+                    requester, RoomAlias.from_string(alias),
+                    new_room_id, servers=(self.hs.hostname, ),
+                    send_event=False,
+                )
+                logger.info("Moved alias %s to new room", alias)
+            except SynapseError as e:
+                # I'm not really expecting this to happen, but it could if the spam
+                # checking module decides it shouldn't, or similar.
+                logger.error(
+                    "Error adding alias %s to new room: %s",
+                    alias, e,
+                )
+
+        try:
+            if canonical_alias and (canonical_alias in removed_aliases):
+                yield self.event_creation_handler.create_and_send_nonmember_event(
+                    requester,
+                    {
+                        "type": EventTypes.CanonicalAlias,
+                        "state_key": "",
+                        "room_id": new_room_id,
+                        "sender": requester.user.to_string(),
+                        "content": {"alias": canonical_alias, },
+                    },
+                    ratelimit=False
+                )
+
+            yield directory_handler.send_room_alias_update_event(
+                requester, new_room_id,
+            )
+        except SynapseError as e:
+            # again I'm not really expecting this to fail, but if it does, I'd rather
+            # we returned the new room to the client at this point.
+            logger.error(
+                "Unable to send updated alias events in new room: %s", e,
+            )
 
     @defer.inlineCallbacks
     def create_room(self, requester, config, ratelimit=True,
@@ -165,28 +494,7 @@ class RoomCreationHandler(BaseHandler):
         visibility = config.get("visibility", None)
         is_public = visibility == "public"
 
-        # autogen room IDs and try to create it. We may clash, so just
-        # try a few times till one goes through, giving up eventually.
-        attempts = 0
-        room_id = None
-        while attempts < 5:
-            try:
-                random_string = stringutils.random_string(18)
-                gen_room_id = RoomID(
-                    random_string,
-                    self.hs.hostname,
-                )
-                yield self.store.store_room(
-                    room_id=gen_room_id.to_string(),
-                    room_creator_user_id=user_id,
-                    is_public=is_public
-                )
-                room_id = gen_room_id.to_string()
-                break
-            except StoreError:
-                attempts += 1
-        if not room_id:
-            raise StoreError(500, "Couldn't generate a room ID.")
+        room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
 
         if room_alias:
             directory_handler = self.hs.get_handlers().directory_handler
@@ -216,18 +524,15 @@ class RoomCreationHandler(BaseHandler):
         # override any attempt to set room versions via the creation_content
         creation_content["room_version"] = room_version
 
-        room_member_handler = self.hs.get_room_member_handler()
-
         yield self._send_events_for_new_room(
             requester,
             room_id,
-            room_member_handler,
             preset_config=preset_config,
             invite_list=invite_list,
             initial_state=initial_state,
             creation_content=creation_content,
             room_alias=room_alias,
-            power_level_content_override=config.get("power_level_content_override", {}),
+            power_level_content_override=config.get("power_level_content_override"),
             creator_join_profile=creator_join_profile,
         )
 
@@ -263,7 +568,7 @@ class RoomCreationHandler(BaseHandler):
             if is_direct:
                 content["is_direct"] = is_direct
 
-            yield room_member_handler.update_membership(
+            yield self.room_member_handler.update_membership(
                 requester,
                 UserID.from_string(invitee),
                 room_id,
@@ -301,14 +606,13 @@ class RoomCreationHandler(BaseHandler):
             self,
             creator,  # A Requester object.
             room_id,
-            room_member_handler,
             preset_config,
             invite_list,
             initial_state,
             creation_content,
-            room_alias,
-            power_level_content_override,
-            creator_join_profile,
+            room_alias=None,
+            power_level_content_override=None,
+            creator_join_profile=None,
     ):
         def create(etype, content, **kwargs):
             e = {
@@ -324,6 +628,7 @@ class RoomCreationHandler(BaseHandler):
         @defer.inlineCallbacks
         def send(etype, content, **kwargs):
             event = create(etype, content, **kwargs)
+            logger.info("Sending %s in new room", etype)
             yield self.event_creation_handler.create_and_send_nonmember_event(
                 creator,
                 event,
@@ -346,7 +651,8 @@ class RoomCreationHandler(BaseHandler):
             content=creation_content,
         )
 
-        yield room_member_handler.update_membership(
+        logger.info("Sending %s in new room", EventTypes.Member)
+        yield self.room_member_handler.update_membership(
             creator,
             creator.user,
             room_id,
@@ -388,7 +694,8 @@ class RoomCreationHandler(BaseHandler):
                 for invitee in invite_list:
                     power_level_content["users"][invitee] = 100
 
-            power_level_content.update(power_level_content_override)
+            if power_level_content_override:
+                power_level_content.update(power_level_content_override)
 
             yield send(
                 etype=EventTypes.PowerLevels,
@@ -427,6 +734,30 @@ class RoomCreationHandler(BaseHandler):
                 content=content,
             )
 
+    @defer.inlineCallbacks
+    def _generate_room_id(self, creator_id, is_public):
+        # autogen room IDs and try to create it. We may clash, so just
+        # try a few times till one goes through, giving up eventually.
+        attempts = 0
+        while attempts < 5:
+            try:
+                random_string = stringutils.random_string(18)
+                gen_room_id = RoomID(
+                    random_string,
+                    self.hs.hostname,
+                ).to_string()
+                if isinstance(gen_room_id, bytes):
+                    gen_room_id = gen_room_id.decode('utf-8')
+                yield self.store.store_room(
+                    room_id=gen_room_id,
+                    room_creator_user_id=creator_id,
+                    is_public=is_public,
+                )
+                defer.returnValue(gen_room_id)
+            except StoreError:
+                attempts += 1
+        raise StoreError(500, "Couldn't generate a room ID.")
+
 
 class RoomContextHandler(object):
     def __init__(self, hs):
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index f369124258..50e1007d84 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -85,7 +85,10 @@ class EmailPusher(object):
             self.timed_call = None
 
     def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
-        self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
+        if self.max_stream_ordering:
+            self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
+        else:
+            self.max_stream_ordering = max_stream_ordering
         self._start_processing()
 
     def on_new_receipts(self, min_stream_id, max_stream_id):
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 16fb5e8471..ebcb93bfc7 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -26,7 +26,6 @@ import bleach
 import jinja2
 
 from twisted.internet import defer
-from twisted.mail.smtp import sendmail
 
 from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
@@ -85,6 +84,7 @@ class Mailer(object):
         self.notif_template_html = notif_template_html
         self.notif_template_text = notif_template_text
 
+        self.sendmail = self.hs.get_sendmail()
         self.store = self.hs.get_datastore()
         self.macaroon_gen = self.hs.get_macaroon_generator()
         self.state_handler = self.hs.get_state_handler()
@@ -191,11 +191,11 @@ class Mailer(object):
         multipart_msg.attach(html_part)
 
         logger.info("Sending email push notification to %s" % email_address)
-        # logger.debug(html_text)
 
-        yield sendmail(
+        yield self.sendmail(
             self.hs.config.email_smtp_host,
-            raw_from, raw_to, multipart_msg.as_string(),
+            raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
+            reactor=self.hs.get_reactor(),
             port=self.hs.config.email_smtp_port,
             requireAuthentication=self.hs.config.email_smtp_user is not None,
             username=self.hs.config.email_smtp_user,
@@ -333,7 +333,7 @@ class Mailer(object):
                           notif_events, user_id, reason):
         if len(notifs_by_room) == 1:
             # Only one room has new stuff
-            room_id = notifs_by_room.keys()[0]
+            room_id = list(notifs_by_room.keys())[0]
 
             # If the room has some kind of name, use it, but we don't
             # want the generated-from-names one here otherwise we'll
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 943876456b..ca62ee7637 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -51,7 +51,6 @@ REQUIREMENTS = {
     "daemonize>=2.3.1": ["daemonize"],
     "bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
     "pillow>=3.1.2": ["PIL"],
-    "pydenticon>=0.2": ["pydenticon"],
     "sortedcontainers>=1.4.4": ["sortedcontainers"],
     "psutil>=2.0.0": ["psutil>=2.0.0"],
     "pysaml2>=3.0.0": ["saml2"],
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index cbe9645817..586dddb40b 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -106,7 +106,7 @@ class ReplicationClientHandler(object):
 
         Can be overriden in subclasses to handle more.
         """
-        logger.info("Received rdata %s -> %s", stream_name, token)
+        logger.debug("Received rdata %s -> %s", stream_name, token)
         return self.store.process_replication_rows(stream_name, token, rows)
 
     def on_position(self, stream_name, token):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 5dc7b3fffc..0b3fe6cbf5 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -656,7 +656,7 @@ tcp_inbound_commands = LaterGauge(
     "",
     ["command", "name"],
     lambda: {
-        (k[0], p.name,): count
+        (k, p.name,): count
         for p in connected_connections
         for k, count in iteritems(p.inbound_commands_counter)
     },
@@ -667,7 +667,7 @@ tcp_outbound_commands = LaterGauge(
     "",
     ["command", "name"],
     lambda: {
-        (k[0], p.name,): count
+        (k, p.name,): count
         for p in connected_connections
         for k, count in iteritems(p.outbound_commands_counter)
     },
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 4856822a5d..5f35c2d1be 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -47,6 +47,7 @@ from synapse.rest.client.v2_alpha import (
     register,
     report_event,
     room_keys,
+    room_upgrade_rest_servlet,
     sendtodevice,
     sync,
     tags,
@@ -116,3 +117,4 @@ class ClientRestResource(JsonResource):
         sendtodevice.register_servlets(hs, client_resource)
         user_directory.register_servlets(hs, client_resource)
         groups.register_servlets(hs, client_resource)
+        room_upgrade_rest_servlet.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
new file mode 100644
index 0000000000..e6356101fd
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import KNOWN_ROOM_VERSIONS
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
+
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class RoomUpgradeRestServlet(RestServlet):
+    """Handler for room uprade requests.
+
+    Handles requests of the form:
+
+        POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
+        Content-Type: application/json
+
+        {
+            "new_version": "2",
+        }
+
+    Creates a new room and shuts down the old one. Returns the ID of the new room.
+
+    Args:
+        hs (synapse.server.HomeServer):
+    """
+    PATTERNS = client_v2_patterns(
+        # /rooms/$roomid/upgrade
+        "/rooms/(?P<room_id>[^/]*)/upgrade$",
+        v2_alpha=False,
+    )
+
+    def __init__(self, hs):
+        super(RoomUpgradeRestServlet, self).__init__()
+        self._hs = hs
+        self._room_creation_handler = hs.get_room_creation_handler()
+        self._auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, room_id):
+        requester = yield self._auth.get_user_by_req(request)
+
+        content = parse_json_object_from_request(request)
+        assert_params_in_dict(content, ("new_version", ))
+        new_version = content["new_version"]
+
+        if new_version not in KNOWN_ROOM_VERSIONS:
+            raise SynapseError(
+                400,
+                "Your homeserver does not support this room version",
+                Codes.UNSUPPORTED_ROOM_VERSION,
+            )
+
+        new_room_id = yield self._room_creation_handler.upgrade_room(
+            requester, room_id, new_version
+        )
+
+        ret = {
+            "replacement_room": new_room_id,
+        }
+
+        defer.returnValue((200, ret))
+
+
+def register_servlets(hs, http_server):
+    RoomUpgradeRestServlet(hs).register(http_server)
diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py
deleted file mode 100644
index fe0ac3f8e9..0000000000
--- a/synapse/rest/key/v1/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py
deleted file mode 100644
index 38eb2ee23f..0000000000
--- a/synapse/rest/key/v1/server_key_resource.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import logging
-
-from canonicaljson import encode_canonical_json
-from signedjson.sign import sign_json
-from unpaddedbase64 import encode_base64
-
-from OpenSSL import crypto
-from twisted.web.resource import Resource
-
-from synapse.http.server import respond_with_json_bytes
-
-logger = logging.getLogger(__name__)
-
-
-class LocalKey(Resource):
-    """HTTP resource containing encoding the TLS X.509 certificate and NACL
-    signature verification keys for this server::
-
-        GET /key HTTP/1.1
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json
-        {
-            "server_name": "this.server.example.com"
-            "verify_keys": {
-                "algorithm:version": # base64 encoded NACL verification key.
-            },
-            "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
-            "signatures": {
-                "this.server.example.com": {
-                   "algorithm:version": # NACL signature for this server.
-                }
-            }
-        }
-    """
-
-    def __init__(self, hs):
-        self.response_body = encode_canonical_json(
-            self.response_json_object(hs.config)
-        )
-        Resource.__init__(self)
-
-    @staticmethod
-    def response_json_object(server_config):
-        verify_keys = {}
-        for key in server_config.signing_key:
-            verify_key_bytes = key.verify_key.encode()
-            key_id = "%s:%s" % (key.alg, key.version)
-            verify_keys[key_id] = encode_base64(verify_key_bytes)
-
-        x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1,
-            server_config.tls_certificate
-        )
-        json_object = {
-            u"server_name": server_config.server_name,
-            u"verify_keys": verify_keys,
-            u"tls_certificate": encode_base64(x509_certificate_bytes)
-        }
-        for key in server_config.signing_key:
-            json_object = sign_json(
-                json_object,
-                server_config.server_name,
-                key,
-            )
-
-        return json_object
-
-    def render_GET(self, request):
-        return respond_with_json_bytes(
-            request, 200, self.response_body,
-        )
-
-    def getChild(self, name, request):
-        if name == b'':
-            return self
diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py
deleted file mode 100644
index bdbd8d50dd..0000000000
--- a/synapse/rest/media/v1/identicon_resource.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from pydenticon import Generator
-
-from twisted.web.resource import Resource
-
-from synapse.http.servlet import parse_integer
-
-FOREGROUND = [
-    "rgb(45,79,255)",
-    "rgb(254,180,44)",
-    "rgb(226,121,234)",
-    "rgb(30,179,253)",
-    "rgb(232,77,65)",
-    "rgb(49,203,115)",
-    "rgb(141,69,170)"
-]
-
-BACKGROUND = "rgb(224,224,224)"
-SIZE = 5
-
-
-class IdenticonResource(Resource):
-    isLeaf = True
-
-    def __init__(self):
-        Resource.__init__(self)
-        self.generator = Generator(
-            SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
-        )
-
-    def generate_identicon(self, name, width, height):
-        v_padding = width % SIZE
-        h_padding = height % SIZE
-        top_padding = v_padding // 2
-        left_padding = h_padding // 2
-        bottom_padding = v_padding - top_padding
-        right_padding = h_padding - left_padding
-        width -= v_padding
-        height -= h_padding
-        padding = (top_padding, bottom_padding, left_padding, right_padding)
-        identicon = self.generator.generate(
-            name, width, height, padding=padding
-        )
-        return identicon
-
-    def render_GET(self, request):
-        name = "/".join(request.postpath)
-        width = parse_integer(request, "width", default=96)
-        height = parse_integer(request, "height", default=96)
-        identicon_bytes = self.generate_identicon(name, width, height)
-        request.setHeader(b"Content-Type", b"image/png")
-        request.setHeader(
-            b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
-        )
-        return identicon_bytes
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 08b1867fab..d6c5f07af0 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -45,7 +45,6 @@ from ._base import FileInfo, respond_404, respond_with_responder
 from .config_resource import MediaConfigResource
 from .download_resource import DownloadResource
 from .filepath import MediaFilePaths
-from .identicon_resource import IdenticonResource
 from .media_storage import MediaStorage
 from .preview_url_resource import PreviewUrlResource
 from .storage_provider import StorageProviderWrapper
@@ -769,7 +768,6 @@ class MediaRepositoryResource(Resource):
         self.putChild(b"thumbnail", ThumbnailResource(
             hs, media_repo, media_repo.media_storage,
         ))
-        self.putChild(b"identicon", IdenticonResource())
         if hs.config.url_preview_enabled:
             self.putChild(b"preview_url", PreviewUrlResource(
                 hs, media_repo, media_repo.media_storage,
diff --git a/synapse/server.py b/synapse/server.py
index cf6b872cbd..9985687b95 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,6 +23,7 @@ import abc
 import logging
 
 from twisted.enterprise import adbapi
+from twisted.mail.smtp import sendmail
 from twisted.web.client import BrowserLikePolicyForHTTPS
 
 from synapse.api.auth import Auth
@@ -174,6 +175,7 @@ class HomeServer(object):
         'message_handler',
         'pagination_handler',
         'room_context_handler',
+        'sendmail',
     ]
 
     # This is overridden in derived application classes
@@ -269,6 +271,9 @@ class HomeServer(object):
     def build_room_creation_handler(self):
         return RoomCreationHandler(self)
 
+    def build_sendmail(self):
+        return sendmail
+
     def build_state_handler(self):
         return StateHandler(self)
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
index ce28486233..06cd083a74 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -7,6 +7,9 @@ import synapse.handlers.auth
 import synapse.handlers.deactivate_account
 import synapse.handlers.device
 import synapse.handlers.e2e_keys
+import synapse.handlers.room
+import synapse.handlers.room_member
+import synapse.handlers.message
 import synapse.handlers.set_password
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
@@ -50,6 +53,9 @@ class HomeServer(object):
     def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
         pass
 
+    def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
+        pass
+
     def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
         pass
 
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index d10ff9e4b9..62497ab63f 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -589,10 +589,14 @@ class DeviceStore(SQLBaseStore):
         combined list of changes to devices, and which destinations need to be
         poked. `destination` may be None if no destinations need to be poked.
         """
+        # We do a group by here as there can be a large number of duplicate
+        # entries, since we throw away device IDs.
         sql = """
-            SELECT stream_id, user_id, destination FROM device_lists_stream
+            SELECT MAX(stream_id) AS stream_id, user_id, destination
+            FROM device_lists_stream
             LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
             WHERE ? < stream_id AND stream_id <= ?
+            GROUP BY user_id, destination
         """
         return self._execute(
             "get_all_device_list_changes_for_remotes", None,
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 8881b009df..919e855f3b 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -38,6 +38,7 @@ from synapse.state import StateResolutionStore
 from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.storage.event_federation import EventFederationStore
 from synapse.storage.events_worker import EventsWorkerStore
+from synapse.storage.state import StateGroupWorkerStore
 from synapse.types import RoomStreamToken, get_domain_from_id
 from synapse.util import batch_iter
 from synapse.util.async_helpers import ObservableDeferred
@@ -205,7 +206,8 @@ def _retry_on_integrity_error(func):
 
 # inherits from EventFederationStore so that we can call _update_backward_extremities
 # and _handle_mult_prev_events (though arguably those could both be moved in here)
-class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
+class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
+                  BackgroundUpdateStore):
     EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
     EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
 
@@ -2034,55 +2036,37 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
 
         logger.info("[purge] finding redundant state groups")
 
-        # Get all state groups that are only referenced by events that are
-        # to be deleted.
-        # This works by first getting state groups that we may want to delete,
-        # joining against event_to_state_groups to get events that use that
-        # state group, then left joining against events_to_purge again. Any
-        # state group where the left join produce *no nulls* are referenced
-        # only by events that are going to be purged.
+        # Get all state groups that are referenced by events that are to be
+        # deleted. We then go and check if they are referenced by other events
+        # or state groups, and if not we delete them.
         txn.execute("""
-            SELECT state_group FROM
-            (
-                SELECT DISTINCT state_group FROM events_to_purge
-                INNER JOIN event_to_state_groups USING (event_id)
-            ) AS sp
-            INNER JOIN event_to_state_groups USING (state_group)
-            LEFT JOIN events_to_purge AS ep USING (event_id)
-            GROUP BY state_group
-            HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0
+            SELECT DISTINCT state_group FROM events_to_purge
+            INNER JOIN event_to_state_groups USING (event_id)
         """)
 
-        state_rows = txn.fetchall()
-        logger.info("[purge] found %i redundant state groups", len(state_rows))
-
-        # make a set of the redundant state groups, so that we can look them up
-        # efficiently
-        state_groups_to_delete = set([sg for sg, in state_rows])
-
-        # Now we get all the state groups that rely on these state groups
-        logger.info("[purge] finding state groups which depend on redundant"
-                    " state groups")
-        remaining_state_groups = []
-        for i in range(0, len(state_rows), 100):
-            chunk = [sg for sg, in state_rows[i:i + 100]]
-            # look for state groups whose prev_state_group is one we are about
-            # to delete
-            rows = self._simple_select_many_txn(
-                txn,
-                table="state_group_edges",
-                column="prev_state_group",
-                iterable=chunk,
-                retcols=["state_group"],
-                keyvalues={},
-            )
-            remaining_state_groups.extend(
-                row["state_group"] for row in rows
+        referenced_state_groups = set(sg for sg, in txn)
+        logger.info(
+            "[purge] found %i referenced state groups",
+            len(referenced_state_groups),
+        )
 
-                # exclude state groups we are about to delete: no point in
-                # updating them
-                if row["state_group"] not in state_groups_to_delete
+        logger.info("[purge] finding state groups that can be deleted")
+
+        state_groups_to_delete, remaining_state_groups = (
+            self._find_unreferenced_groups_during_purge(
+                txn, referenced_state_groups,
             )
+        )
+
+        logger.info(
+            "[purge] found %i state groups to delete",
+            len(state_groups_to_delete),
+        )
+
+        logger.info(
+            "[purge] de-delta-ing %i remaining state groups",
+            len(remaining_state_groups),
+        )
 
         # Now we turn the state groups that reference to-be-deleted state
         # groups to non delta versions.
@@ -2127,11 +2111,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
         logger.info("[purge] removing redundant state groups")
         txn.executemany(
             "DELETE FROM state_groups_state WHERE state_group = ?",
-            state_rows
+            ((sg,) for sg in state_groups_to_delete),
         )
         txn.executemany(
             "DELETE FROM state_groups WHERE id = ?",
-            state_rows
+            ((sg,) for sg in state_groups_to_delete),
         )
 
         logger.info("[purge] removing events from event_to_state_groups")
@@ -2227,6 +2211,85 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
 
         logger.info("[purge] done")
 
+    def _find_unreferenced_groups_during_purge(self, txn, state_groups):
+        """Used when purging history to figure out which state groups can be
+        deleted and which need to be de-delta'ed (due to one of its prev groups
+        being scheduled for deletion).
+
+        Args:
+            txn
+            state_groups (set[int]): Set of state groups referenced by events
+                that are going to be deleted.
+
+        Returns:
+            tuple[set[int], set[int]]: The set of state groups that can be
+            deleted and the set of state groups that need to be de-delta'ed
+        """
+        # Graph of state group -> previous group
+        graph = {}
+
+        # Set of events that we have found to be referenced by events
+        referenced_groups = set()
+
+        # Set of state groups we've already seen
+        state_groups_seen = set(state_groups)
+
+        # Set of state groups to handle next.
+        next_to_search = set(state_groups)
+        while next_to_search:
+            # We bound size of groups we're looking up at once, to stop the
+            # SQL query getting too big
+            if len(next_to_search) < 100:
+                current_search = next_to_search
+                next_to_search = set()
+            else:
+                current_search = set(itertools.islice(next_to_search, 100))
+                next_to_search -= current_search
+
+            # Check if state groups are referenced
+            sql = """
+                SELECT DISTINCT state_group FROM event_to_state_groups
+                LEFT JOIN events_to_purge AS ep USING (event_id)
+                WHERE state_group IN (%s) AND ep.event_id IS NULL
+            """ % (",".join("?" for _ in current_search),)
+            txn.execute(sql, list(current_search))
+
+            referenced = set(sg for sg, in txn)
+            referenced_groups |= referenced
+
+            # We don't continue iterating up the state group graphs for state
+            # groups that are referenced.
+            current_search -= referenced
+
+            rows = self._simple_select_many_txn(
+                txn,
+                table="state_group_edges",
+                column="prev_state_group",
+                iterable=current_search,
+                keyvalues={},
+                retcols=("prev_state_group", "state_group",),
+            )
+
+            prevs = set(row["state_group"] for row in rows)
+            # We don't bother re-handling groups we've already seen
+            prevs -= state_groups_seen
+            next_to_search |= prevs
+            state_groups_seen |= prevs
+
+            for row in rows:
+                # Note: Each state group can have at most one prev group
+                graph[row["state_group"]] = row["prev_state_group"]
+
+        to_delete = state_groups_seen - referenced_groups
+
+        to_dedelta = set()
+        for sg in referenced_groups:
+            prev_sg = graph.get(sg)
+            if prev_sg and prev_sg in to_delete:
+                to_dedelta.add(sg)
+
+        return to_delete, to_dedelta
+
     @defer.inlineCallbacks
     def is_event_after(self, event_id1, event_id2):
         """Returns True if event_id1 is after event_id2 in the stream
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index b364719312..bd740e1e45 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 51
+SCHEMA_VERSION = 52
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 61013b8919..41c65e112a 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore):
         Args:
             room_id (str): The ID of the room to retrieve.
         Returns:
-            A namedtuple containing the room information, or an empty list.
+            A dict containing the room information, or None if the room is unknown.
         """
         return self._simple_select_one(
             table="rooms",
diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
new file mode 100644
index 0000000000..91e03d13e1
--- /dev/null
+++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This is needed to efficiently check for unreferenced state groups during
+-- purge. Added events_to_state_group(state_group) index
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('event_to_state_groups_sg_index', '{}');
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index ef65929bb2..d737bd6778 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -1257,6 +1257,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
     STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
     STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
     CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
+    EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
 
     def __init__(self, db_conn, hs):
         super(StateStore, self).__init__(db_conn, hs)
@@ -1275,6 +1276,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
             columns=["state_key"],
             where_clause="type='m.room.member'",
         )
+        self.register_background_index_update(
+            self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
+            index_name="event_to_state_groups_sg_index",
+            table="event_to_state_groups",
+            columns=["state_group"],
+        )
 
     def _store_event_state_mappings_txn(self, txn, events_and_contexts):
         state_groups = {}
diff --git a/tests/push/__init__.py b/tests/push/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/push/__init__.py
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
new file mode 100644
index 0000000000..50ee6910d1
--- /dev/null
+++ b/tests/push/test_email.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pkg_resources
+
+from twisted.internet.defer import Deferred
+
+from synapse.rest.client.v1 import admin, login, room
+
+from tests.unittest import HomeserverTestCase
+
+try:
+    from synapse.push.mailer import load_jinja2_templates
+except Exception:
+    load_jinja2_templates = None
+
+
+class EmailPusherTests(HomeserverTestCase):
+
+    skip = "No Jinja installed" if not load_jinja2_templates else None
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+    user_id = True
+    hijack_auth = False
+
+    def make_homeserver(self, reactor, clock):
+
+        # List[Tuple[Deferred, args, kwargs]]
+        self.email_attempts = []
+
+        def sendmail(*args, **kwargs):
+            d = Deferred()
+            self.email_attempts.append((d, args, kwargs))
+            return d
+
+        config = self.default_config()
+        config.email_enable_notifs = True
+        config.start_pushers = True
+
+        config.email_template_dir = os.path.abspath(
+            pkg_resources.resource_filename('synapse', 'res/templates')
+        )
+        config.email_notif_template_html = "notif_mail.html"
+        config.email_notif_template_text = "notif_mail.txt"
+        config.email_smtp_host = "127.0.0.1"
+        config.email_smtp_port = 20
+        config.require_transport_security = False
+        config.email_smtp_user = None
+        config.email_app_name = "Matrix"
+        config.email_notif_from = "test@example.com"
+
+        hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
+
+        return hs
+
+    def test_sends_email(self):
+
+        # Register the user who gets notified
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Register the user who sends the message
+        other_user_id = self.register_user("otheruser", "pass")
+        other_access_token = self.login("otheruser", "pass")
+
+        # Register the pusher
+        user_tuple = self.get_success(
+            self.hs.get_datastore().get_user_by_access_token(access_token)
+        )
+        token_id = user_tuple["token_id"]
+
+        self.get_success(
+            self.hs.get_pusherpool().add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="email",
+                app_id="m.email",
+                app_display_name="Email Notifications",
+                device_display_name="a@example.com",
+                pushkey="a@example.com",
+                lang=None,
+                data={},
+            )
+        )
+
+        # Create a room
+        room = self.helper.create_room_as(user_id, tok=access_token)
+
+        # Invite the other person
+        self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+        # The other user joins
+        self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+        # The other user sends some messages
+        self.helper.send(room, body="Hi!", tok=other_access_token)
+        self.helper.send(room, body="There!", tok=other_access_token)
+
+        # Get the stream ordering before it gets sent
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+        # Advance time a bit, so the pusher will register something has happened
+        self.pump(100)
+
+        # It hasn't succeeded yet, so the stream ordering shouldn't have moved
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
+
+        # One email was attempted to be sent
+        self.assertEqual(len(self.email_attempts), 1)
+
+        # Make the email succeed
+        self.email_attempts[0][0].callback(True)
+        self.pump()
+
+        # One email was attempted to be sent
+        self.assertEqual(len(self.email_attempts), 1)
+
+        # The stream ordering has increased
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/server.py b/tests/server.py
index 7bee58dff1..819c854448 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -125,7 +125,9 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe
     req.content = BytesIO(content)
 
     if access_token:
-        req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
+        req.requestHeaders.addRawHeader(
+            b"Authorization", b"Bearer " + access_token.encode('ascii')
+        )
 
     if content:
         req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 4701eedd45..b1551df7ca 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -4,7 +4,6 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, ServerNoticeMsgType
 from synapse.api.errors import ResourceLimitError
-from synapse.handlers.auth import AuthHandler
 from synapse.server_notices.resource_limits_server_notices import (
     ResourceLimitsServerNotices,
 )
@@ -13,17 +12,10 @@ from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
-class AuthHandlers(object):
-    def __init__(self, hs):
-        self.auth_handler = AuthHandler(hs)
-
-
 class TestResourceLimitsServerNotices(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
-        self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
-        self.hs.handlers = AuthHandlers(self.hs)
-        self.auth_handler = self.hs.handlers.auth_handler
+        self.hs = yield setup_test_homeserver(self.addCleanup)
         self.server_notices_sender = self.hs.get_server_notices_sender()
 
         # relying on [1] is far from ideal, but the only case where
diff --git a/tests/test_mau.py b/tests/test_mau.py
index bdbacb8448..5d387851c5 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -207,7 +207,7 @@ class TestMauLimit(unittest.TestCase):
 
     def do_sync_for_user(self, token):
         request, channel = make_request(
-            "GET", "/sync", access_token=token.encode('ascii')
+            "GET", "/sync", access_token=token
         )
         render(request, self.resource, self.reactor)
 
diff --git a/tests/unittest.py b/tests/unittest.py
index a59291cc60..4d40bdb6a5 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -146,6 +146,13 @@ def DEBUG(target):
     return target
 
 
+def INFO(target):
+    """A decorator to set the .loglevel attribute to logging.INFO.
+    Can apply to either a TestCase or an individual test method."""
+    target.loglevel = logging.INFO
+    return target
+
+
 class HomeserverTestCase(TestCase):
     """
     A base TestCase that reduces boilerplate for HomeServer-using test cases.
@@ -373,5 +380,5 @@ class HomeserverTestCase(TestCase):
         self.render(request)
         self.assertEqual(channel.code, 200)
 
-        access_token = channel.json_body["access_token"].encode('ascii')
+        access_token = channel.json_body["access_token"]
         return access_token
diff --git a/tox.ini b/tox.ini
index 9de5a5704a..920211bf50 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,6 +11,20 @@ deps =
     # needed by some of the tests
     lxml
 
+    # cyptography 2.2 requires setuptools >= 18.5
+    #
+    # older versions of virtualenv (?) give us a virtualenv with the same
+    # version of setuptools as is installed on the system python (and tox runs
+    # virtualenv under python3, so we get the version of setuptools that is
+    # installed on that).
+    #
+    # anyway, make sure that we have a recent enough setuptools.
+    setuptools>=18.5
+
+    # we also need a semi-recent version of pip, because old ones fail to
+    # install the "enum34" dependency of cryptography.
+    pip>=10
+
 setenv =
     PYTHONDONTWRITEBYTECODE = no_byte_code