summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.codecov.yml15
-rw-r--r--.coveragerc6
-rw-r--r--.gitignore6
-rw-r--r--MANIFEST.in1
-rw-r--r--README.rst2
-rw-r--r--changelog.d/4229.feature1
-rw-r--r--changelog.d/4306.misc1
-rw-r--r--changelog.d/4384.feature1
-rw-r--r--changelog.d/4390.misc1
-rw-r--r--changelog.d/4400.misc1
-rw-r--r--changelog.d/4402.misc1
-rw-r--r--changelog.d/4404.bugfix1
-rw-r--r--changelog.d/4405.bugfix1
-rw-r--r--changelog.d/4407.bugfix1
-rw-r--r--changelog.d/4409.misc1
-rw-r--r--changelog.d/4411.bugfix1
-rw-r--r--changelog.d/4423.feature1
-rw-r--r--changelog.d/4426.misc1
-rw-r--r--changelog.d/4427.misc1
-rw-r--r--changelog.d/4428.misc1
-rw-r--r--changelog.d/4432.misc1
-rw-r--r--changelog.d/4433.misc1
-rw-r--r--changelog.d/4434.misc1
-rw-r--r--changelog.d/4435.bugfix1
-rw-r--r--changelog.d/4437.misc1
-rw-r--r--changelog.d/4444.misc1
-rw-r--r--changelog.d/4445.feature1
-rw-r--r--changelog.d/4447.misc1
-rw-r--r--changelog.d/4448.misc1
-rw-r--r--changelog.d/4452.bugfix1
-rw-r--r--changelog.d/4458.misc1
-rw-r--r--changelog.d/4459.misc1
-rw-r--r--changelog.d/4460.bugfix1
-rw-r--r--changelog.d/4461.bugfix1
-rw-r--r--changelog.d/4464.misc1
-rwxr-xr-xdebian/build_virtualenv49
-rw-r--r--debian/control2
-rw-r--r--debian/homeserver.yaml617
-rw-r--r--debian/install1
-rw-r--r--demo/demo.tls.dh9
-rw-r--r--docker/conf/homeserver.yaml1
-rwxr-xr-xscripts-dev/build_debian_packages2
-rw-r--r--synapse/api/auth.py4
-rw-r--r--synapse/api/constants.py14
-rw-r--r--synapse/api/urls.py4
-rwxr-xr-xsynapse/app/homeserver.py56
-rw-r--r--synapse/config/_base.py4
-rw-r--r--synapse/config/registration.py9
-rw-r--r--synapse/config/server.py15
-rw-r--r--synapse/config/tls.py149
-rw-r--r--synapse/crypto/context_factory.py21
-rw-r--r--synapse/crypto/keyclient.py149
-rw-r--r--synapse/crypto/keyring.py30
-rw-r--r--synapse/events/__init__.py28
-rw-r--r--synapse/federation/federation_base.py11
-rw-r--r--synapse/federation/federation_client.py80
-rw-r--r--synapse/federation/federation_server.py16
-rw-r--r--synapse/federation/transport/client.py132
-rw-r--r--synapse/federation/transport/server.py46
-rw-r--r--synapse/handlers/acme.py147
-rw-r--r--synapse/handlers/federation.py51
-rw-r--r--synapse/handlers/room.py1
-rw-r--r--synapse/handlers/room_list.py14
-rw-r--r--synapse/handlers/user_directory.py6
-rw-r--r--synapse/http/client.py5
-rw-r--r--synapse/http/endpoint.py280
-rw-r--r--synapse/http/federation/__init__.py14
-rw-r--r--synapse/http/federation/matrix_federation_agent.py125
-rw-r--r--synapse/http/federation/srv_resolver.py169
-rw-r--r--synapse/http/matrixfederationclient.py63
-rw-r--r--synapse/python_dependencies.py15
-rw-r--r--synapse/rest/client/v2_alpha/register.py23
-rw-r--r--synapse/server.py5
-rw-r--r--synapse/storage/_base.py171
-rw-r--r--synapse/storage/client_ips.py70
-rw-r--r--synapse/storage/engines/__init__.py2
-rw-r--r--synapse/storage/engines/postgres.py14
-rw-r--r--synapse/storage/engines/sqlite.py (renamed from synapse/storage/engines/sqlite3.py)9
-rw-r--r--synapse/storage/events.py14
-rw-r--r--synapse/storage/events_worker.py19
-rw-r--r--synapse/storage/pusher.py9
-rw-r--r--synapse/storage/roommember.py8
-rw-r--r--synapse/storage/schema/delta/53/event_format_version.sql16
-rw-r--r--synapse/storage/user_directory.py55
-rw-r--r--synapse/util/async_helpers.py4
-rw-r--r--tests/config/test_generate.py1
-rw-r--r--tests/http/federation/__init__.py14
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py315
-rw-r--r--tests/http/federation/test_srv_resolver.py207
-rw-r--r--tests/http/test_fedclient.py150
-rw-r--r--tests/server.py15
-rw-r--r--tests/storage/test_base.py1
-rw-r--r--tests/test_dns.py129
-rw-r--r--tests/test_server.py12
-rw-r--r--tests/unittest.py12
-rw-r--r--tests/util/test_async_utils.py104
-rw-r--r--tests/utils.py4
-rw-r--r--tox.ini1
98 files changed, 2211 insertions, 1572 deletions
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000000..a05698a39c
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,15 @@
+comment:
+  layout: "diff"
+
+coverage:
+  status:
+    project:
+      default:
+        target: 0  # Target % coverage, can be auto. Turned off for now
+        threshold: null
+        base: auto
+    patch:
+      default:
+        target: 0
+        threshold: null
+        base: auto
diff --git a/.coveragerc b/.coveragerc
index 9873a30738..e9460a340a 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,11 +1,7 @@
 [run]
 branch = True
 parallel = True
-source = synapse
-
-[paths]
-source=
-   coverage
+include = synapse/*
 
 [report]
 precision = 2
diff --git a/.gitignore b/.gitignore
index d739595c3a..1033124f1d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,9 +25,9 @@ homeserver*.pid
 *.tls.dh
 *.tls.key
 
-.coverage
-.coverage.*
-!.coverage.rc
+.coverage*
+coverage.*
+!.coveragerc
 htmlcov
 
 demo/*/*.db
diff --git a/MANIFEST.in b/MANIFEST.in
index 29303cc8b5..7e4c031b79 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -37,6 +37,7 @@ prune docker
 prune .circleci
 prune .coveragerc
 prune debian
+prune .codecov.yml
 
 exclude jenkins*
 recursive-exclude jenkins *.sh
diff --git a/README.rst b/README.rst
index 8bff55e78e..05a3bb3751 100644
--- a/README.rst
+++ b/README.rst
@@ -220,7 +220,7 @@ is configured to use TLS with a self-signed certificate. If you would like
 to do initial test with a client without having to setup a reverse proxy,
 you can temporarly use another certificate. (Note that a self-signed
 certificate is fine for `Federation`_). You can do so by changing
-``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
+``tls_certificate_path`` and ``tls_private_key_path``
 in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
 to read `Using a reverse proxy with Synapse`_ when doing so.
 
diff --git a/changelog.d/4229.feature b/changelog.d/4229.feature
new file mode 100644
index 0000000000..0d1996c7e8
--- /dev/null
+++ b/changelog.d/4229.feature
@@ -0,0 +1 @@
+Synapse's cipher string has been updated to require ECDH key exchange. Configuring and generating dh_params is no longer required, and they will be ignored.
diff --git a/changelog.d/4306.misc b/changelog.d/4306.misc
new file mode 100644
index 0000000000..58130b6190
--- /dev/null
+++ b/changelog.d/4306.misc
@@ -0,0 +1 @@
+Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+.
diff --git a/changelog.d/4384.feature b/changelog.d/4384.feature
new file mode 100644
index 0000000000..daedcd58c4
--- /dev/null
+++ b/changelog.d/4384.feature
@@ -0,0 +1 @@
+Synapse can now automatically provision TLS certificates via ACME (the protocol used by CAs like Let's Encrypt).
diff --git a/changelog.d/4390.misc b/changelog.d/4390.misc
new file mode 100644
index 0000000000..c05a9609cf
--- /dev/null
+++ b/changelog.d/4390.misc
@@ -0,0 +1 @@
+Add ground work for implementing future federation API versions
diff --git a/changelog.d/4400.misc b/changelog.d/4400.misc
new file mode 100644
index 0000000000..3d299dfe95
--- /dev/null
+++ b/changelog.d/4400.misc
@@ -0,0 +1 @@
+Tweak codecov settings to make them less loud.
diff --git a/changelog.d/4402.misc b/changelog.d/4402.misc
new file mode 100644
index 0000000000..4a0063ed34
--- /dev/null
+++ b/changelog.d/4402.misc
@@ -0,0 +1 @@
+Implement server support for MSC1794 - Federation v2 Invite API
diff --git a/changelog.d/4404.bugfix b/changelog.d/4404.bugfix
new file mode 100644
index 0000000000..5d40a3a60b
--- /dev/null
+++ b/changelog.d/4404.bugfix
@@ -0,0 +1 @@
+Fix potential bug where creating or joining a room could fail
diff --git a/changelog.d/4405.bugfix b/changelog.d/4405.bugfix
new file mode 100644
index 0000000000..974d799b88
--- /dev/null
+++ b/changelog.d/4405.bugfix
@@ -0,0 +1 @@
+Fix bug when rejecting remote invites
diff --git a/changelog.d/4407.bugfix b/changelog.d/4407.bugfix
new file mode 100644
index 0000000000..54c5e76d1f
--- /dev/null
+++ b/changelog.d/4407.bugfix
@@ -0,0 +1 @@
+Fix incorrect logcontexts after a Deferred was cancelled
diff --git a/changelog.d/4409.misc b/changelog.d/4409.misc
new file mode 100644
index 0000000000..9cf2adfbb1
--- /dev/null
+++ b/changelog.d/4409.misc
@@ -0,0 +1 @@
+Remove redundant federation connection wrapping code
diff --git a/changelog.d/4411.bugfix b/changelog.d/4411.bugfix
new file mode 100644
index 0000000000..219e98a924
--- /dev/null
+++ b/changelog.d/4411.bugfix
@@ -0,0 +1 @@
+Ensure encrypted room state is persisted across room upgrades.
\ No newline at end of file
diff --git a/changelog.d/4423.feature b/changelog.d/4423.feature
new file mode 100644
index 0000000000..74aeab6d39
--- /dev/null
+++ b/changelog.d/4423.feature
@@ -0,0 +1 @@
+Config option to disable requesting MSISDN on registration.
diff --git a/changelog.d/4426.misc b/changelog.d/4426.misc
new file mode 100644
index 0000000000..cda50438e0
--- /dev/null
+++ b/changelog.d/4426.misc
@@ -0,0 +1 @@
+Remove redundant SynapseKeyClientProtocol magic
\ No newline at end of file
diff --git a/changelog.d/4427.misc b/changelog.d/4427.misc
new file mode 100644
index 0000000000..75500bdbc2
--- /dev/null
+++ b/changelog.d/4427.misc
@@ -0,0 +1 @@
+Refactor and cleanup for SRV record lookup
diff --git a/changelog.d/4428.misc b/changelog.d/4428.misc
new file mode 100644
index 0000000000..9a51434755
--- /dev/null
+++ b/changelog.d/4428.misc
@@ -0,0 +1 @@
+Move SRV logic into the Agent layer
diff --git a/changelog.d/4432.misc b/changelog.d/4432.misc
new file mode 100644
index 0000000000..047061ed3c
--- /dev/null
+++ b/changelog.d/4432.misc
@@ -0,0 +1 @@
+Apply a unique index to the user_ips table, preventing duplicates.
diff --git a/changelog.d/4433.misc b/changelog.d/4433.misc
new file mode 100644
index 0000000000..30f2912db2
--- /dev/null
+++ b/changelog.d/4433.misc
@@ -0,0 +1 @@
+debian package: symlink to explicit python version
diff --git a/changelog.d/4434.misc b/changelog.d/4434.misc
new file mode 100644
index 0000000000..047061ed3c
--- /dev/null
+++ b/changelog.d/4434.misc
@@ -0,0 +1 @@
+Apply a unique index to the user_ips table, preventing duplicates.
diff --git a/changelog.d/4435.bugfix b/changelog.d/4435.bugfix
new file mode 100644
index 0000000000..4ea9a5df02
--- /dev/null
+++ b/changelog.d/4435.bugfix
@@ -0,0 +1 @@
+Fix None guard in calling config.server.is_threepid_reserved
diff --git a/changelog.d/4437.misc b/changelog.d/4437.misc
new file mode 100644
index 0000000000..43f8963614
--- /dev/null
+++ b/changelog.d/4437.misc
@@ -0,0 +1 @@
+Add infrastructure to support different event formats
diff --git a/changelog.d/4444.misc b/changelog.d/4444.misc
new file mode 100644
index 0000000000..1be84188c6
--- /dev/null
+++ b/changelog.d/4444.misc
@@ -0,0 +1 @@
+Generate the debian config during build
diff --git a/changelog.d/4445.feature b/changelog.d/4445.feature
new file mode 100644
index 0000000000..a6f9b7bbac
--- /dev/null
+++ b/changelog.d/4445.feature
@@ -0,0 +1 @@
+Add a metric for tracking event stream position of the user directory.
\ No newline at end of file
diff --git a/changelog.d/4447.misc b/changelog.d/4447.misc
new file mode 100644
index 0000000000..43f8963614
--- /dev/null
+++ b/changelog.d/4447.misc
@@ -0,0 +1 @@
+Add infrastructure to support different event formats
diff --git a/changelog.d/4448.misc b/changelog.d/4448.misc
new file mode 100644
index 0000000000..43f8963614
--- /dev/null
+++ b/changelog.d/4448.misc
@@ -0,0 +1 @@
+Add infrastructure to support different event formats
diff --git a/changelog.d/4452.bugfix b/changelog.d/4452.bugfix
new file mode 100644
index 0000000000..a715ca3788
--- /dev/null
+++ b/changelog.d/4452.bugfix
@@ -0,0 +1 @@
+Don't send IP addresses as SNI
diff --git a/changelog.d/4458.misc b/changelog.d/4458.misc
new file mode 100644
index 0000000000..8b3bc94a34
--- /dev/null
+++ b/changelog.d/4458.misc
@@ -0,0 +1 @@
+Clarify documentation for the `public_baseurl` config param
diff --git a/changelog.d/4459.misc b/changelog.d/4459.misc
new file mode 100644
index 0000000000..58130b6190
--- /dev/null
+++ b/changelog.d/4459.misc
@@ -0,0 +1 @@
+Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+.
diff --git a/changelog.d/4460.bugfix b/changelog.d/4460.bugfix
new file mode 100644
index 0000000000..8c5d5b4e0e
--- /dev/null
+++ b/changelog.d/4460.bugfix
@@ -0,0 +1 @@
+Fix UnboundLocalError in post_urlencoded_get_json
diff --git a/changelog.d/4461.bugfix b/changelog.d/4461.bugfix
new file mode 100644
index 0000000000..92062a2bfb
--- /dev/null
+++ b/changelog.d/4461.bugfix
@@ -0,0 +1 @@
+Add a timeout to filtered room directory queries.
\ No newline at end of file
diff --git a/changelog.d/4464.misc b/changelog.d/4464.misc
new file mode 100644
index 0000000000..9a51434755
--- /dev/null
+++ b/changelog.d/4464.misc
@@ -0,0 +1 @@
+Move SRV logic into the Agent layer
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index 83346c40f1..bead3ebc6e 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -6,7 +6,16 @@
 set -e
 
 export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
-SNAKE=/usr/bin/python3
+
+# make sure that the virtualenv links to the specific version of python, by
+# dereferencing the python3 symlink.
+#
+# Otherwise, if somebody tries to install (say) the stretch package on buster,
+# they will get a confusing error about "No module named 'synapse'", because
+# python won't look in the right directory. At least this way, the error will
+# be a *bit* more obvious.
+#
+SNAKE=`readlink -e /usr/bin/python3`
 
 # try to set the CFLAGS so any compiled C extensions are compiled with the most
 # generic as possible x64 instructions, so that compiling it on a new Intel chip
@@ -36,6 +45,10 @@ dh_virtualenv \
     --extra-pip-arg="--compile" \
     --extras="all"
 
+PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
+VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
+TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
+
 # we copy the tests to a temporary directory so that we can put them on the
 # PYTHONPATH without putting the uninstalled synapse on the pythonpath.
 tmpdir=`mktemp -d`
@@ -44,5 +57,35 @@ trap "rm -r $tmpdir" EXIT
 cp -r tests "$tmpdir"
 
 PYTHONPATH="$tmpdir" \
-    debian/matrix-synapse-py3/opt/venvs/matrix-synapse/bin/python \
-        -B -m twisted.trial --reporter=text -j2 tests
+    "${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
+
+# build the config file
+"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
+        --config-dir="/etc/matrix-synapse" \
+        --data-dir="/var/lib/matrix-synapse" |
+    perl -pe '
+# tweak the paths to the tls certs and signing keys
+/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
+/^signing_key_path:/ and s/SERVERNAME/homeserver/;
+
+# tweak the pid file location
+/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
+
+# tweak the path to the log config
+/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
+
+# tweak the path to the media store
+/^media_store_path:/ and s#/media_store#/media#;
+
+# remove the server_name setting, which is set in a separate file
+/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
+
+# remove the report_stats setting, which is set in a separate file
+/^# report_stats:/ and $_ = "";
+
+' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
+
+
+# add a dependency on the right version of python to substvars.
+PYPKG=`basename $SNAKE`
+echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
diff --git a/debian/control b/debian/control
index b85e36c6ca..4abfa02051 100644
--- a/debian/control
+++ b/debian/control
@@ -27,8 +27,8 @@ Depends:
  adduser,
  debconf,
  python3-distutils|libpython3-stdlib (<< 3.6),
- python3,
  ${misc:Depends},
+ ${synapse:pydepends},
 # some of our scripts use perl, but none of them are important,
 # so we put perl:Depends in Suggests rather than Depends.
 Suggests:
diff --git a/debian/homeserver.yaml b/debian/homeserver.yaml
deleted file mode 100644
index 188a2d5483..0000000000
--- a/debian/homeserver.yaml
+++ /dev/null
@@ -1,617 +0,0 @@
-# vim:ft=yaml
-# PEM encoded X509 certificate for TLS.
-# You can replace the self-signed certificate that synapse
-# autogenerates on launch with your own SSL certificate + key pair
-# if you like.  Any required intermediary certificates can be
-# appended after the primary certificate in hierarchical order.
-tls_certificate_path: "/etc/matrix-synapse/homeserver.tls.crt"
-
-# PEM encoded private key for TLS
-tls_private_key_path: "/etc/matrix-synapse/homeserver.tls.key"
-
-# PEM dh parameters for ephemeral keys
-tls_dh_params_path: "/etc/matrix-synapse/homeserver.tls.dh"
-
-# Don't bind to the https port
-no_tls: False
-
-# List of allowed TLS fingerprints for this server to publish along
-# with the signing keys for this server. Other matrix servers that
-# make HTTPS requests to this server will check that the TLS
-# certificates returned by this server match one of the fingerprints.
-#
-# Synapse automatically adds the fingerprint of its own certificate
-# to the list. So if federation traffic is handled directly by synapse
-# then no modification to the list is required.
-#
-# If synapse is run behind a load balancer that handles the TLS then it
-# will be necessary to add the fingerprints of the certificates used by
-# the loadbalancers to this list if they are different to the one
-# synapse is using.
-#
-# Homeservers are permitted to cache the list of TLS fingerprints
-# returned in the key responses up to the "valid_until_ts" returned in
-# key. It may be necessary to publish the fingerprints of a new
-# certificate and wait until the "valid_until_ts" of the previous key
-# responses have passed before deploying it.
-#
-# You can calculate a fingerprint from a given TLS listener via:
-# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
-#   openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
-# or by checking matrix.org/federationtester/api/report?server_name=$host
-#
-tls_fingerprints: []
-# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
-
-
-## Server ##
-
-# When running as a daemon, the file to store the pid in
-pid_file: "/var/run/matrix-synapse.pid"
-
-# CPU affinity mask. Setting this restricts the CPUs on which the
-# process will be scheduled. It is represented as a bitmask, with the
-# lowest order bit corresponding to the first logical CPU and the
-# highest order bit corresponding to the last logical CPU. Not all CPUs
-# may exist on a given system but a mask may specify more CPUs than are
-# present.
-#
-# For example:
-#    0x00000001  is processor #0,
-#    0x00000003  is processors #0 and #1,
-#    0xFFFFFFFF  is all processors (#0 through #31).
-#
-# Pinning a Python process to a single CPU is desirable, because Python
-# is inherently single-threaded due to the GIL, and can suffer a
-# 30-40% slowdown due to cache blow-out and thread context switching
-# if the scheduler happens to schedule the underlying threads across
-# different cores. See
-# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
-#
-# cpu_affinity: 0xFFFFFFFF
-
-# The path to the web client which will be served at /_matrix/client/
-# if 'webclient' is configured under the 'listeners' configuration.
-#
-# web_client_location: "/path/to/web/root"
-
-# The public-facing base URL for the client API (not including _matrix/...)
-# public_baseurl: https://example.com:8448/
-
-# Set the soft limit on the number of file descriptors synapse can use
-# Zero is used to indicate synapse should set the soft limit to the
-# hard limit.
-soft_file_limit: 0
-
-# The GC threshold parameters to pass to `gc.set_threshold`, if defined
-# gc_thresholds: [700, 10, 10]
-
-# Set the limit on the returned events in the timeline in the get
-# and sync operations. The default value is -1, means no upper limit.
-# filter_timeline_limit: 5000
-
-# Whether room invites to users on this server should be blocked
-# (except those sent by local server admins). The default is False.
-# block_non_admin_invites: True
-
-# Restrict federation to the following whitelist of domains.
-# N.B. we recommend also firewalling your federation listener to limit
-# inbound federation traffic as early as possible, rather than relying
-# purely on this application-layer restriction.  If not specified, the
-# default is to whitelist everything.
-#
-# federation_domain_whitelist:
-#  - lon.example.com
-#  - nyc.example.com
-#  - syd.example.com
-
-# List of ports that Synapse should listen on, their purpose and their
-# configuration.
-listeners:
-  # Main HTTPS listener
-  # For when matrix traffic is sent directly to synapse.
-  -
-    # The port to listen for HTTPS requests on.
-    port: 8448
-
-    # Local addresses to listen on.
-    # On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
-    # addresses by default. For most other OSes, this will only listen
-    # on IPv6.
-    bind_addresses:
-      - '::'
-      - '0.0.0.0'
-
-    # This is a 'http' listener, allows us to specify 'resources'.
-    type: http
-
-    tls: true
-
-    # Use the X-Forwarded-For (XFF) header as the client IP and not the
-    # actual client IP.
-    x_forwarded: false
-
-    # List of HTTP resources to serve on this listener.
-    resources:
-      -
-        # List of resources to host on this listener.
-        names:
-          - client     # The client-server APIs, both v1 and v2
-          - webclient  # The bundled webclient.
-
-        # Should synapse compress HTTP responses to clients that support it?
-        # This should be disabled if running synapse behind a load balancer
-        # that can do automatic compression.
-        compress: true
-
-      - names: [federation]  # Federation APIs
-        compress: false
-
-    # optional list of additional endpoints which can be loaded via
-    # dynamic modules
-    # additional_resources:
-    #   "/_matrix/my/custom/endpoint":
-    #     module: my_module.CustomRequestHandler
-    #     config: {}
-
-  # Unsecure HTTP listener,
-  # For when matrix traffic passes through loadbalancer that unwraps TLS.
-  - port: 8008
-    tls: false
-    bind_addresses: ['::', '0.0.0.0']
-    type: http
-
-    x_forwarded: false
-
-    resources:
-      - names: [client, webclient]
-        compress: true
-      - names: [federation]
-        compress: false
-
-  # Turn on the twisted ssh manhole service on localhost on the given
-  # port.
-  # - port: 9000
-  #   bind_addresses: ['::1', '127.0.0.1']
-  #   type: manhole
-
-
-# Database configuration
-database:
-  # The database engine name
-  name: "sqlite3"
-  # Arguments to pass to the engine
-  args:
-    # Path to the database
-    database: "/var/lib/matrix-synapse/homeserver.db"
-
-# Number of events to cache in memory.
-event_cache_size: "10K"
-
-
-# A yaml python logging config file
-log_config: "/etc/matrix-synapse/log.yaml"
-
-
-
-## Ratelimiting ##
-
-# Number of messages a client can send per second
-rc_messages_per_second: 0.2
-
-# Number of message a client can send before being throttled
-rc_message_burst_count: 10.0
-
-# The federation window size in milliseconds
-federation_rc_window_size: 1000
-
-# The number of federation requests from a single server in a window
-# before the server will delay processing the request.
-federation_rc_sleep_limit: 10
-
-# The duration in milliseconds to delay processing events from
-# remote servers by if they go over the sleep limit.
-federation_rc_sleep_delay: 500
-
-# The maximum number of concurrent federation requests allowed
-# from a single server
-federation_rc_reject_limit: 50
-
-# The number of federation requests to concurrently process from a
-# single server
-federation_rc_concurrent: 3
-
-
-
-# Directory where uploaded images and attachments are stored.
-media_store_path: "/var/lib/matrix-synapse/media"
-
-# Media storage providers allow media to be stored in different
-# locations.
-# media_storage_providers:
-# - module: file_system
-#   # Whether to write new local files.
-#   store_local: false
-#   # Whether to write new remote media
-#   store_remote: false
-#   # Whether to block upload requests waiting for write to this
-#   # provider to complete
-#   store_synchronous: false
-#   config:
-#     directory: /mnt/some/other/directory
-
-# Directory where in-progress uploads are stored.
-uploads_path: "/var/lib/matrix-synapse/uploads"
-
-# The largest allowed upload size in bytes
-max_upload_size: "10M"
-
-# Maximum number of pixels that will be thumbnailed
-max_image_pixels: "32M"
-
-# Whether to generate new thumbnails on the fly to precisely match
-# the resolution requested by the client. If true then whenever
-# a new resolution is requested by the client the server will
-# generate a new thumbnail. If false the server will pick a thumbnail
-# from a precalculated list.
-dynamic_thumbnails: false
-
-# List of thumbnail to precalculate when an image is uploaded.
-thumbnail_sizes:
-- width: 32
-  height: 32
-  method: crop
-- width: 96
-  height: 96
-  method: crop
-- width: 320
-  height: 240
-  method: scale
-- width: 640
-  height: 480
-  method: scale
-- width: 800
-  height: 600
-  method: scale
-
-# Is the preview URL API enabled?  If enabled, you *must* specify
-# an explicit url_preview_ip_range_blacklist of IPs that the spider is
-# denied from accessing.
-url_preview_enabled: False
-
-# List of IP address CIDR ranges that the URL preview spider is denied
-# from accessing.  There are no defaults: you must explicitly
-# specify a list for URL previewing to work.  You should specify any
-# internal services in your network that you do not want synapse to try
-# to connect to, otherwise anyone in any Matrix room could cause your
-# synapse to issue arbitrary GET requests to your internal services,
-# causing serious security issues.
-#
-# url_preview_ip_range_blacklist:
-# - '127.0.0.0/8'
-# - '10.0.0.0/8'
-# - '172.16.0.0/12'
-# - '192.168.0.0/16'
-# - '100.64.0.0/10'
-# - '169.254.0.0/16'
-#
-# List of IP address CIDR ranges that the URL preview spider is allowed
-# to access even if they are specified in url_preview_ip_range_blacklist.
-# This is useful for specifying exceptions to wide-ranging blacklisted
-# target IP ranges - e.g. for enabling URL previews for a specific private
-# website only visible in your network.
-#
-# url_preview_ip_range_whitelist:
-# - '192.168.1.1'
-
-# Optional list of URL matches that the URL preview spider is
-# denied from accessing.  You should use url_preview_ip_range_blacklist
-# in preference to this, otherwise someone could define a public DNS
-# entry that points to a private IP address and circumvent the blacklist.
-# This is more useful if you know there is an entire shape of URL that
-# you know that will never want synapse to try to spider.
-#
-# Each list entry is a dictionary of url component attributes as returned
-# by urlparse.urlsplit as applied to the absolute form of the URL.  See
-# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
-# The values of the dictionary are treated as an filename match pattern
-# applied to that component of URLs, unless they start with a ^ in which
-# case they are treated as a regular expression match.  If all the
-# specified component matches for a given list item succeed, the URL is
-# blacklisted.
-#
-# url_preview_url_blacklist:
-# # blacklist any URL with a username in its URI
-# - username: '*'
-#
-# # blacklist all *.google.com URLs
-# - netloc: 'google.com'
-# - netloc: '*.google.com'
-#
-# # blacklist all plain HTTP URLs
-# - scheme: 'http'
-#
-# # blacklist http(s)://www.acme.com/foo
-# - netloc: 'www.acme.com'
-#   path: '/foo'
-#
-# # blacklist any URL with a literal IPv4 address
-# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
-
-# The largest allowed URL preview spidering size in bytes
-max_spider_size: "10M"
-
-
-
-
-## Captcha ##
-# See docs/CAPTCHA_SETUP for full details of configuring this.
-
-# This Home Server's ReCAPTCHA public key.
-recaptcha_public_key: "YOUR_PUBLIC_KEY"
-
-# This Home Server's ReCAPTCHA private key.
-recaptcha_private_key: "YOUR_PRIVATE_KEY"
-
-# Enables ReCaptcha checks when registering, preventing signup
-# unless a captcha is answered. Requires a valid ReCaptcha
-# public/private key.
-enable_registration_captcha: False
-
-# A secret key used to bypass the captcha test entirely.
-#captcha_bypass_secret: "YOUR_SECRET_HERE"
-
-# The API endpoint to use for verifying m.login.recaptcha responses.
-recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
-
-
-## Turn ##
-
-# The public URIs of the TURN server to give to clients
-turn_uris: []
-
-# The shared secret used to compute passwords for the TURN server
-turn_shared_secret: "YOUR_SHARED_SECRET"
-
-# The Username and password if the TURN server needs them and
-# does not use a token
-#turn_username: "TURNSERVER_USERNAME"
-#turn_password: "TURNSERVER_PASSWORD"
-
-# How long generated TURN credentials last
-turn_user_lifetime: "1h"
-
-# Whether guests should be allowed to use the TURN server.
-# This defaults to True, otherwise VoIP will be unreliable for guests.
-# However, it does introduce a slight security risk as it allows users to
-# connect to arbitrary endpoints without having first signed up for a
-# valid account (e.g. by passing a CAPTCHA).
-turn_allow_guests: False
-
-
-## Registration ##
-
-# Enable registration for new users.
-enable_registration: False
-
-# The user must provide all of the below types of 3PID when registering.
-#
-# registrations_require_3pid:
-#     - email
-#     - msisdn
-
-# Mandate that users are only allowed to associate certain formats of
-# 3PIDs with accounts on this server.
-#
-# allowed_local_3pids:
-#     - medium: email
-#       pattern: ".*@matrix\.org"
-#     - medium: email
-#       pattern: ".*@vector\.im"
-#     - medium: msisdn
-#       pattern: "\+44"
-
-# If set, allows registration by anyone who also has the shared
-# secret, even if registration is otherwise disabled.
-# registration_shared_secret: <PRIVATE STRING>
-
-# Set the number of bcrypt rounds used to generate password hash.
-# Larger numbers increase the work factor needed to generate the hash.
-# The default number is 12 (which equates to 2^12 rounds).
-# N.B. that increasing this will exponentially increase the time required
-# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
-bcrypt_rounds: 12
-
-# Allows users to register as guests without a password/email/etc, and
-# participate in rooms hosted on this server which have been made
-# accessible to anonymous users.
-allow_guest_access: False
-
-# The list of identity servers trusted to verify third party
-# identifiers by this server.
-trusted_third_party_id_servers:
-    - matrix.org
-    - vector.im
-    - riot.im
-
-# Users who register on this homeserver will automatically be joined
-# to these rooms
-#auto_join_rooms:
-#    - "#example:example.com"
-
-
-## Metrics ###
-
-# Enable collection and rendering of performance metrics
-enable_metrics: False
-
-## API Configuration ##
-
-# A list of event types that will be included in the room_invite_state
-room_invite_state_types:
-    - "m.room.join_rules"
-    - "m.room.canonical_alias"
-    - "m.room.avatar"
-    - "m.room.name"
-
-
-# A list of application service config file to use
-app_service_config_files: []
-
-
-# macaroon_secret_key: <PRIVATE STRING>
-
-# Used to enable access token expiration.
-expire_access_token: False
-
-## Signing Keys ##
-
-# Path to the signing key to sign messages with
-signing_key_path: "/etc/matrix-synapse/homeserver.signing.key"
-
-# The keys that the server used to sign messages with but won't use
-# to sign new messages. E.g. it has lost its private key
-old_signing_keys: {}
-#  "ed25519:auto":
-#    # Base64 encoded public key
-#    key: "The public part of your old signing key."
-#    # Millisecond POSIX timestamp when the key expired.
-#    expired_ts: 123456789123
-
-# How long key response published by this server is valid for.
-# Used to set the valid_until_ts in /key/v2 APIs.
-# Determines how quickly servers will query to check which keys
-# are still valid.
-key_refresh_interval: "1d" # 1 Day.
-
-# The trusted servers to download signing keys from.
-perspectives:
-  servers:
-    "matrix.org":
-      verify_keys:
-        "ed25519:auto":
-          key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
-
-
-
-# Enable SAML2 for registration and login. Uses pysaml2
-# config_path:      Path to the sp_conf.py configuration file
-# idp_redirect_url: Identity provider URL which will redirect
-#                   the user back to /login/saml2 with proper info.
-# See pysaml2 docs for format of config.
-#saml2_config:
-#   enabled: true
-#   config_path: "/home/erikj/git/synapse/sp_conf.py"
-#   idp_redirect_url: "http://test/idp"
-
-
-
-# Enable CAS for registration and login.
-#cas_config:
-#   enabled: true
-#   server_url: "https://cas-server.com"
-#   service_url: "https://homeserver.domain.com:8448"
-#   #required_attributes:
-#   #    name: value
-
-
-# The JWT needs to contain a globally unique "sub" (subject) claim.
-#
-# jwt_config:
-#    enabled: true
-#    secret: "a secret"
-#    algorithm: "HS256"
-
-
-
-# Enable password for login.
-password_config:
-   enabled: true
-   # Uncomment and change to a secret random string for extra security.
-   # DO NOT CHANGE THIS AFTER INITIAL SETUP!
-   #pepper: ""
-
-
-
-# Enable sending emails for notification events
-# Defining a custom URL for Riot is only needed if email notifications
-# should contain links to a self-hosted installation of Riot; when set
-# the "app_name" setting is ignored.
-#
-# If your SMTP server requires authentication, the optional smtp_user &
-# smtp_pass variables should be used
-#
-#email:
-#   enable_notifs: false
-#   smtp_host: "localhost"
-#   smtp_port: 25
-#   smtp_user: "exampleusername"
-#   smtp_pass: "examplepassword"
-#   require_transport_security: False
-#   notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
-#   app_name: Matrix
-#   template_dir: res/templates
-#   notif_template_html: notif_mail.html
-#   notif_template_text: notif_mail.txt
-#   notif_for_new_users: True
-#   riot_base_url: "http://localhost/riot"
-
-
-# password_providers:
-#     - module: "ldap_auth_provider.LdapAuthProvider"
-#       config:
-#         enabled: true
-#         uri: "ldap://ldap.example.com:389"
-#         start_tls: true
-#         base: "ou=users,dc=example,dc=com"
-#         attributes:
-#            uid: "cn"
-#            mail: "email"
-#            name: "givenName"
-#         #bind_dn:
-#         #bind_password:
-#         #filter: "(objectClass=posixAccount)"
-
-
-
-# Clients requesting push notifications can either have the body of
-# the message sent in the notification poke along with other details
-# like the sender, or just the event ID and room ID (`event_id_only`).
-# If clients choose the former, this option controls whether the
-# notification request includes the content of the event (other details
-# like the sender are still included). For `event_id_only` push, it
-# has no effect.
-
-# For modern android devices the notification content will still appear
-# because it is loaded by the app. iPhone, however will send a
-# notification saying only that a message arrived and who it came from.
-#
-#push:
-#   include_content: true
-
-
-# spam_checker:
-#     module: "my_custom_project.SuperSpamChecker"
-#     config:
-#         example_option: 'things'
-
-
-# Whether to allow non server admins to create groups on this server
-enable_group_creation: false
-
-# If enabled, non server admins can only create groups with local parts
-# starting with this prefix
-# group_creation_prefix: "unofficial/"
-
-
-
-# User Directory configuration
-#
-# 'search_all_users' defines whether to search all users visible to your HS
-# when searching the user directory, rather than limiting to users visible
-# in public rooms.  Defaults to false.  If you set it True, you'll have to run
-# UPDATE user_directory_stream_pos SET stream_id = NULL;
-# on your database to tell it to rebuild the user_directory search indexes.
-#
-#user_directory:
-#   search_all_users: false
diff --git a/debian/install b/debian/install
index d1f42c8be6..3d916a9718 100644
--- a/debian/install
+++ b/debian/install
@@ -1,2 +1 @@
-debian/homeserver.yaml etc/matrix-synapse
 debian/log.yaml etc/matrix-synapse
diff --git a/demo/demo.tls.dh b/demo/demo.tls.dh
deleted file mode 100644
index cbc58272a0..0000000000
--- a/demo/demo.tls.dh
+++ /dev/null
@@ -1,9 +0,0 @@
-2048-bit DH parameters taken from rfc3526
------BEGIN DH PARAMETERS-----
-MIIBCAKCAQEA///////////JD9qiIWjCNMTGYouA3BzRKQJOCIpnzHQCC76mOxOb
-IlFKCHmONATd75UZs806QxswKwpt8l8UN0/hNW1tUcJF5IW1dmJefsb0TELppjft
-awv/XLb0Brft7jhr+1qJn6WunyQRfEsf5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT
-mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVSu57VKQdwlpZtZww1Tkq8mATxdGwIyhgh
-fDKQXkYuNs474553LBgOhgObJ4Oi7Aeij7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq
-5RXSJhiY+gUQFXKOWoqsqmj//////////wIBAg==
------END DH PARAMETERS-----
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index c2b8576a32..529118d184 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -4,7 +4,6 @@
 
 tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
 tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
-tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
 no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
 tls_fingerprints: []
 
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages
index 577d93e6f6..6b9be99060 100755
--- a/scripts-dev/build_debian_packages
+++ b/scripts-dev/build_debian_packages
@@ -10,12 +10,12 @@
 # can be passed on the commandline for debugging.
 
 import argparse
-from concurrent.futures import ThreadPoolExecutor
 import os
 import signal
 import subprocess
 import sys
 import threading
+from concurrent.futures import ThreadPoolExecutor
 
 DISTS = (
     "debian:stretch",
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index ba1019b9b2..e37b807c94 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -819,7 +819,9 @@ class Auth(object):
             elif threepid:
                 # If the user does not exist yet, but is signing up with a
                 # reserved threepid then pass auth check
-                if is_threepid_reserved(self.hs.config, threepid):
+                if is_threepid_reserved(
+                    self.hs.config.mau_limits_reserved_threepids, threepid
+                ):
                     return
             # Else if there is no room in the MAU bucket, bail
             current_mau = yield self.store.get_monthly_active_count()
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 022f772714..51ee078bc3 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -68,6 +68,7 @@ class EventTypes(object):
     Aliases = "m.room.aliases"
     Redaction = "m.room.redaction"
     ThirdPartyInvite = "m.room.third_party_invite"
+    Encryption = "m.room.encryption"
 
     RoomHistoryVisibility = "m.room.history_visibility"
     CanonicalAlias = "m.room.canonical_alias"
@@ -119,6 +120,19 @@ KNOWN_ROOM_VERSIONS = {
     RoomVersions.STATE_V2_TEST,
 }
 
+
+class EventFormatVersions(object):
+    """This is an internal enum for tracking the version of the event format,
+    independently from the room version.
+    """
+    V1 = 1
+
+
+KNOWN_EVENT_FORMAT_VERSIONS = {
+    EventFormatVersions.V1,
+}
+
+
 ServerNoticeMsgType = "m.server_notice"
 ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
 
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index f78695b657..8102176653 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -24,7 +24,9 @@ from synapse.config import ConfigError
 
 CLIENT_PREFIX = "/_matrix/client/api/v1"
 CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
-FEDERATION_PREFIX = "/_matrix/federation/v1"
+FEDERATION_PREFIX = "/_matrix/federation"
+FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
+FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
 STATIC_PREFIX = "/_matrix/static"
 WEB_CLIENT_PREFIX = "/_matrix/client"
 CONTENT_REPO_PREFIX = "/_matrix/content"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index f3ac3d19f0..ffc49d77cc 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -13,10 +13,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import gc
 import logging
 import os
 import sys
+import traceback
 
 from six import iteritems
 
@@ -324,17 +326,12 @@ def setup(config_options):
 
     events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     database_engine = create_engine(config.database_config)
     config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
 
     hs = SynapseHomeServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
@@ -361,12 +358,53 @@ def setup(config_options):
     logger.info("Database prepared in %s.", config.database_config['name'])
 
     hs.setup()
-    hs.start_listening()
 
+    @defer.inlineCallbacks
     def start():
-        hs.get_pusherpool().start()
-        hs.get_datastore().start_profiling()
-        hs.get_datastore().start_doing_background_updates()
+        try:
+            # Check if the certificate is still valid.
+            cert_days_remaining = hs.config.is_disk_cert_valid()
+
+            if hs.config.acme_enabled:
+                # If ACME is enabled, we might need to provision a certificate
+                # before starting.
+                acme = hs.get_acme_handler()
+
+                # Start up the webservices which we will respond to ACME
+                # challenges with.
+                yield acme.start_listening()
+
+                # We want to reprovision if cert_days_remaining is None (meaning no
+                # certificate exists), or the days remaining number it returns
+                # is less than our re-registration threshold.
+                if (cert_days_remaining is None) or (
+                    not cert_days_remaining > hs.config.acme_reprovision_threshold
+                ):
+                    yield acme.provision_certificate()
+
+            # Read the certificate from disk and build the context factories for
+            # TLS.
+            hs.config.read_certificate_from_disk()
+            hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
+            hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
+                config
+            )
+
+            # It is now safe to start your Synapse.
+            hs.start_listening()
+            hs.get_pusherpool().start()
+            hs.get_datastore().start_profiling()
+            hs.get_datastore().start_doing_background_updates()
+        except Exception as e:
+            # If a DeferredList failed (like in listening on the ACME listener),
+            # we need to print the subfailure explicitly.
+            if isinstance(e, defer.FirstError):
+                e.subFailure.printTraceback(sys.stderr)
+                sys.exit(1)
+
+            # Something else went wrong when starting. Print it and bail out.
+            traceback.print_exc(file=sys.stderr)
+            sys.exit(1)
 
     reactor.callWhenRunning(start)
 
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index fd2d6d52ef..5858fb92b4 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -367,7 +367,7 @@ class Config(object):
         if not keys_directory:
             keys_directory = os.path.dirname(config_files[-1])
 
-        config_dir_path = os.path.abspath(keys_directory)
+        self.config_dir_path = os.path.abspath(keys_directory)
 
         specified_config = {}
         for config_file in config_files:
@@ -379,7 +379,7 @@ class Config(object):
 
         server_name = specified_config["server_name"]
         config_string = self.generate_config(
-            config_dir_path=config_dir_path,
+            config_dir_path=self.config_dir_path,
             data_dir_path=os.getcwd(),
             server_name=server_name,
             generate_secrets=False,
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 6c2b543b8c..fe520d6855 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -50,6 +50,10 @@ class RegistrationConfig(Config):
                 raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
         self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
 
+        self.disable_msisdn_registration = (
+            config.get("disable_msisdn_registration", False)
+        )
+
     def default_config(self, generate_secrets=False, **kwargs):
         if generate_secrets:
             registration_shared_secret = 'registration_shared_secret: "%s"' % (
@@ -70,6 +74,11 @@ class RegistrationConfig(Config):
         #     - email
         #     - msisdn
 
+        # Explicitly disable asking for MSISDNs from the registration
+        # flow (overrides registrations_require_3pid if MSISDNs are set as required)
+        #
+        # disable_msisdn_registration = True
+
         # Mandate that users are only allowed to associate certain formats of
         # 3PIDs with accounts on this server.
         #
diff --git a/synapse/config/server.py b/synapse/config/server.py
index fb57791098..22dcc87d8a 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -256,7 +256,11 @@ class ServerConfig(Config):
         #
         # web_client_location: "/path/to/web/root"
 
-        # The public-facing base URL for the client API (not including _matrix/...)
+        # The public-facing base URL that clients use to access this HS
+        # (not including _matrix/...). This is the same URL a user would
+        # enter into the 'custom HS URL' field on their client. If you
+        # use synapse with a reverse proxy, this should be the URL to reach
+        # synapse via the proxy.
         # public_baseurl: https://example.com:8448/
 
         # Set the soft limit on the number of file descriptors synapse can use
@@ -420,19 +424,18 @@ class ServerConfig(Config):
                                   " service on the given port.")
 
 
-def is_threepid_reserved(config, threepid):
+def is_threepid_reserved(reserved_threepids, threepid):
     """Check the threepid against the reserved threepid config
     Args:
-        config(ServerConfig) - to access server config attributes
+        reserved_threepids([dict]) - list of reserved threepids
         threepid(dict) - The threepid to test for
 
     Returns:
         boolean Is the threepid undertest reserved_user
     """
 
-    for tp in config.mau_limits_reserved_threepids:
-        if (threepid['medium'] == tp['medium']
-                and threepid['address'] == tp['address']):
+    for tp in reserved_threepids:
+        if (threepid['medium'] == tp['medium'] and threepid['address'] == tp['address']):
             return True
     return False
 
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index fef1ea99cb..a75e233aa0 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -13,68 +13,110 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
 import os
-import subprocess
+from datetime import datetime
 from hashlib import sha256
 
 from unpaddedbase64 import encode_base64
 
 from OpenSSL import crypto
 
-from ._base import Config
+from synapse.config._base import Config
 
-GENERATE_DH_PARAMS = False
+logger = logging.getLogger()
 
 
 class TlsConfig(Config):
     def read_config(self, config):
-        self.tls_certificate = self.read_tls_certificate(
-            config.get("tls_certificate_path")
-        )
-        self.tls_certificate_file = config.get("tls_certificate_path")
 
+        acme_config = config.get("acme", {})
+        self.acme_enabled = acme_config.get("enabled", False)
+        self.acme_url = acme_config.get(
+            "url", "https://acme-v01.api.letsencrypt.org/directory"
+        )
+        self.acme_port = acme_config.get("port", 8449)
+        self.acme_bind_addresses = acme_config.get("bind_addresses", ["127.0.0.1"])
+        self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
+
+        self.tls_certificate_file = os.path.abspath(config.get("tls_certificate_path"))
+        self.tls_private_key_file = os.path.abspath(config.get("tls_private_key_path"))
+        self._original_tls_fingerprints = config["tls_fingerprints"]
+        self.tls_fingerprints = list(self._original_tls_fingerprints)
         self.no_tls = config.get("no_tls", False)
 
-        if self.no_tls:
-            self.tls_private_key = None
-        else:
-            self.tls_private_key = self.read_tls_private_key(
-                config.get("tls_private_key_path")
-            )
+        # This config option applies to non-federation HTTP clients
+        # (e.g. for talking to recaptcha, identity servers, and such)
+        # It should never be used in production, and is intended for
+        # use only when running tests.
+        self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
+            "use_insecure_ssl_client_just_for_testing_do_not_use"
+        )
 
-        self.tls_dh_params_path = self.check_file(
-            config.get("tls_dh_params_path"), "tls_dh_params"
+        self.tls_certificate = None
+        self.tls_private_key = None
+
+    def is_disk_cert_valid(self):
+        """
+        Is the certificate we have on disk valid, and if so, for how long?
+
+        Returns:
+            int: Days remaining of certificate validity.
+            None: No certificate exists.
+        """
+        if not os.path.exists(self.tls_certificate_file):
+            return None
+
+        try:
+            with open(self.tls_certificate_file, 'rb') as f:
+                cert_pem = f.read()
+        except Exception:
+            logger.exception("Failed to read existing certificate off disk!")
+            raise
+
+        try:
+            tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
+        except Exception:
+            logger.exception("Failed to parse existing certificate off disk!")
+            raise
+
+        # YYYYMMDDhhmmssZ -- in UTC
+        expires_on = datetime.strptime(
+            tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
         )
+        now = datetime.utcnow()
+        days_remaining = (expires_on - now).days
+        return days_remaining
+
+    def read_certificate_from_disk(self):
+        """
+        Read the certificates from disk.
+        """
+        self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
+
+        if not self.no_tls:
+            self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
 
-        self.tls_fingerprints = config["tls_fingerprints"]
+        self.tls_fingerprints = list(self._original_tls_fingerprints)
 
         # Check that our own certificate is included in the list of fingerprints
         # and include it if it is not.
         x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1,
-            self.tls_certificate
+            crypto.FILETYPE_ASN1, self.tls_certificate
         )
         sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
         sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
         if sha256_fingerprint not in sha256_fingerprints:
             self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
 
-        # This config option applies to non-federation HTTP clients
-        # (e.g. for talking to recaptcha, identity servers, and such)
-        # It should never be used in production, and is intended for
-        # use only when running tests.
-        self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
-            "use_insecure_ssl_client_just_for_testing_do_not_use"
-        )
-
     def default_config(self, config_dir_path, server_name, **kwargs):
         base_key_name = os.path.join(config_dir_path, server_name)
 
         tls_certificate_path = base_key_name + ".tls.crt"
         tls_private_key_path = base_key_name + ".tls.key"
-        tls_dh_params_path = base_key_name + ".tls.dh"
 
-        return """\
+        return (
+            """\
         # PEM encoded X509 certificate for TLS.
         # You can replace the self-signed certificate that synapse
         # autogenerates on launch with your own SSL certificate + key pair
@@ -85,9 +127,6 @@ class TlsConfig(Config):
         # PEM encoded private key for TLS
         tls_private_key_path: "%(tls_private_key_path)s"
 
-        # PEM dh parameters for ephemeral keys
-        tls_dh_params_path: "%(tls_dh_params_path)s"
-
         # Don't bind to the https port
         no_tls: False
 
@@ -118,7 +157,24 @@ class TlsConfig(Config):
         #
         tls_fingerprints: []
         # tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
-        """ % locals()
+
+        ## Support for ACME certificate auto-provisioning.
+        # acme:
+        #    enabled: false
+        ##   ACME path.
+        ##   If you only want to test, use the staging url:
+        ##   https://acme-staging.api.letsencrypt.org/directory
+        #    url: 'https://acme-v01.api.letsencrypt.org/directory'
+        ##   Port number (to listen for the HTTP-01 challenge).
+        ##   Using port 80 requires utilising something like authbind, or proxying to it.
+        #    port: 8449
+        ##   Hosts to bind to.
+        #    bind_addresses: ['127.0.0.1']
+        ##   How many days remaining on a certificate before it is renewed.
+        #    reprovision_threshold: 30
+        """
+            % locals()
+        )
 
     def read_tls_certificate(self, cert_path):
         cert_pem = self.read_file(cert_path, "tls_certificate")
@@ -131,7 +187,6 @@ class TlsConfig(Config):
     def generate_files(self, config):
         tls_certificate_path = config["tls_certificate_path"]
         tls_private_key_path = config["tls_private_key_path"]
-        tls_dh_params_path = config["tls_dh_params_path"]
 
         if not self.path_exists(tls_private_key_path):
             with open(tls_private_key_path, "wb") as private_key_file:
@@ -165,31 +220,3 @@ class TlsConfig(Config):
                 cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
 
                 certificate_file.write(cert_pem)
-
-        if not self.path_exists(tls_dh_params_path):
-            if GENERATE_DH_PARAMS:
-                subprocess.check_call([
-                    "openssl", "dhparam",
-                    "-outform", "PEM",
-                    "-out", tls_dh_params_path,
-                    "2048"
-                ])
-            else:
-                with open(tls_dh_params_path, "w") as dh_params_file:
-                    dh_params_file.write(
-                        "2048-bit DH parameters taken from rfc3526\n"
-                        "-----BEGIN DH PARAMETERS-----\n"
-                        "MIIBCAKCAQEA///////////JD9qiIWjC"
-                        "NMTGYouA3BzRKQJOCIpnzHQCC76mOxOb\n"
-                        "IlFKCHmONATd75UZs806QxswKwpt8l8U"
-                        "N0/hNW1tUcJF5IW1dmJefsb0TELppjft\n"
-                        "awv/XLb0Brft7jhr+1qJn6WunyQRfEsf"
-                        "5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT\n"
-                        "mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVS"
-                        "u57VKQdwlpZtZww1Tkq8mATxdGwIyhgh\n"
-                        "fDKQXkYuNs474553LBgOhgObJ4Oi7Aei"
-                        "j7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq\n"
-                        "5RXSJhiY+gUQFXKOWoqsqmj/////////"
-                        "/wIBAg==\n"
-                        "-----END DH PARAMETERS-----\n"
-                    )
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 02b76dfcfb..286ad80100 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -17,6 +17,7 @@ from zope.interface import implementer
 
 from OpenSSL import SSL, crypto
 from twisted.internet._sslverify import _defaultCurveName
+from twisted.internet.abstract import isIPAddress, isIPv6Address
 from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
 from twisted.internet.ssl import CertificateOptions, ContextFactory
 from twisted.python.failure import Failure
@@ -46,8 +47,10 @@ class ServerContextFactory(ContextFactory):
         if not config.no_tls:
             context.use_privatekey(config.tls_private_key)
 
-        context.load_tmp_dh(config.tls_dh_params_path)
-        context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
+        # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+        context.set_cipher_list(
+            "ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1"
+        )
 
     def getContext(self):
         return self._context
@@ -96,8 +99,14 @@ class ClientTLSOptions(object):
 
     def __init__(self, hostname, ctx):
         self._ctx = ctx
-        self._hostname = hostname
-        self._hostnameBytes = _idnaBytes(hostname)
+
+        if isIPAddress(hostname) or isIPv6Address(hostname):
+            self._hostnameBytes = hostname.encode('ascii')
+            self._sendSNI = False
+        else:
+            self._hostnameBytes = _idnaBytes(hostname)
+            self._sendSNI = True
+
         ctx.set_info_callback(
             _tolerateErrors(self._identityVerifyingInfoCallback)
         )
@@ -109,7 +118,9 @@ class ClientTLSOptions(object):
         return connection
 
     def _identityVerifyingInfoCallback(self, connection, where, ret):
-        if where & SSL.SSL_CB_HANDSHAKE_START:
+        # Literal IPv4 and IPv6 addresses are not permitted
+        # as host names according to the RFCs
+        if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
             connection.set_tlsext_host_name(self._hostnameBytes)
 
 
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
deleted file mode 100644
index d40e4b8591..0000000000
--- a/synapse/crypto/keyclient.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from six.moves import urllib
-
-from canonicaljson import json
-
-from twisted.internet import defer, reactor
-from twisted.internet.error import ConnectError
-from twisted.internet.protocol import Factory
-from twisted.names.error import DomainError
-from twisted.web.http import HTTPClient
-
-from synapse.http.endpoint import matrix_federation_endpoint
-from synapse.util import logcontext
-
-logger = logging.getLogger(__name__)
-
-KEY_API_V2 = "/_matrix/key/v2/server/%s"
-
-
-@defer.inlineCallbacks
-def fetch_server_key(server_name, tls_client_options_factory, key_id):
-    """Fetch the keys for a remote server."""
-
-    factory = SynapseKeyClientFactory()
-    factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
-    factory.host = server_name
-    endpoint = matrix_federation_endpoint(
-        reactor, server_name, tls_client_options_factory, timeout=30
-    )
-
-    for i in range(5):
-        try:
-            with logcontext.PreserveLoggingContext():
-                protocol = yield endpoint.connect(factory)
-                server_response, server_certificate = yield protocol.remote_key
-                defer.returnValue((server_response, server_certificate))
-        except SynapseKeyClientError as e:
-            logger.warn("Error getting key for %r: %s", server_name, e)
-            if e.status.startswith(b"4"):
-                # Don't retry for 4xx responses.
-                raise IOError("Cannot get key for %r" % server_name)
-        except (ConnectError, DomainError) as e:
-            logger.warn("Error getting key for %r: %s", server_name, e)
-        except Exception:
-            logger.exception("Error getting key for %r", server_name)
-    raise IOError("Cannot get key for %r" % server_name)
-
-
-class SynapseKeyClientError(Exception):
-    """The key wasn't retrieved from the remote server."""
-    status = None
-    pass
-
-
-class SynapseKeyClientProtocol(HTTPClient):
-    """Low level HTTPS client which retrieves an application/json response from
-    the server and extracts the X.509 certificate for the remote peer from the
-    SSL connection."""
-
-    timeout = 30
-
-    def __init__(self):
-        self.remote_key = defer.Deferred()
-        self.host = None
-        self._peer = None
-
-    def connectionMade(self):
-        self._peer = self.transport.getPeer()
-        logger.debug("Connected to %s", self._peer)
-
-        if not isinstance(self.path, bytes):
-            self.path = self.path.encode('ascii')
-
-        if not isinstance(self.host, bytes):
-            self.host = self.host.encode('ascii')
-
-        self.sendCommand(b"GET", self.path)
-        if self.host:
-            self.sendHeader(b"Host", self.host)
-        self.endHeaders()
-        self.timer = reactor.callLater(
-            self.timeout,
-            self.on_timeout
-        )
-
-    def errback(self, error):
-        if not self.remote_key.called:
-            self.remote_key.errback(error)
-
-    def callback(self, result):
-        if not self.remote_key.called:
-            self.remote_key.callback(result)
-
-    def handleStatus(self, version, status, message):
-        if status != b"200":
-            # logger.info("Non-200 response from %s: %s %s",
-            #            self.transport.getHost(), status, message)
-            error = SynapseKeyClientError(
-                "Non-200 response %r from %r" % (status, self.host)
-            )
-            error.status = status
-            self.errback(error)
-            self.transport.abortConnection()
-
-    def handleResponse(self, response_body_bytes):
-        try:
-            json_response = json.loads(response_body_bytes)
-        except ValueError:
-            # logger.info("Invalid JSON response from %s",
-            #            self.transport.getHost())
-            self.transport.abortConnection()
-            return
-
-        certificate = self.transport.getPeerCertificate()
-        self.callback((json_response, certificate))
-        self.transport.abortConnection()
-        self.timer.cancel()
-
-    def on_timeout(self):
-        logger.debug(
-            "Timeout waiting for response from %s: %s",
-            self.host, self._peer,
-        )
-        self.errback(IOError("Timeout waiting for response"))
-        self.transport.abortConnection()
-
-
-class SynapseKeyClientFactory(Factory):
-    def protocol(self):
-        protocol = SynapseKeyClientProtocol()
-        protocol.path = self.path
-        protocol.host = self.host
-        return protocol
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 515ebbc148..3a96980bed 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -14,10 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
 import logging
 from collections import namedtuple
 
+from six.moves import urllib
+
 from signedjson.key import (
     decode_verify_key_bytes,
     encode_verify_key_base64,
@@ -30,13 +31,11 @@ from signedjson.sign import (
     signature_ids,
     verify_signed_json,
 )
-from unpaddedbase64 import decode_base64, encode_base64
+from unpaddedbase64 import decode_base64
 
-from OpenSSL import crypto
 from twisted.internet import defer
 
 from synapse.api.errors import Codes, SynapseError
-from synapse.crypto.keyclient import fetch_server_key
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.logcontext import (
     LoggingContext,
@@ -503,31 +502,16 @@ class Keyring(object):
             if requested_key_id in keys:
                 continue
 
-            (response, tls_certificate) = yield fetch_server_key(
-                server_name, self.hs.tls_client_options_factory, requested_key_id
+            response = yield self.client.get_json(
+                destination=server_name,
+                path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
+                ignore_backoff=True,
             )
 
             if (u"signatures" not in response
                     or server_name not in response[u"signatures"]):
                 raise KeyLookupError("Key response not signed by remote server")
 
-            if "tls_fingerprints" not in response:
-                raise KeyLookupError("Key response missing TLS fingerprints")
-
-            certificate_bytes = crypto.dump_certificate(
-                crypto.FILETYPE_ASN1, tls_certificate
-            )
-            sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
-            sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
-
-            response_sha256_fingerprints = set()
-            for fingerprint in response[u"tls_fingerprints"]:
-                if u"sha256" in fingerprint:
-                    response_sha256_fingerprints.add(fingerprint[u"sha256"])
-
-            if sha256_fingerprint_b64 not in response_sha256_fingerprints:
-                raise KeyLookupError("TLS certificate not allowed by fingerprints")
-
             response_keys = yield self.process_v2_response(
                 from_server=server_name,
                 requested_ids=[requested_key_id],
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 84c75495d5..38470ad176 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -18,6 +18,7 @@ from distutils.util import strtobool
 
 import six
 
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventFormatVersions
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
 
@@ -41,8 +42,13 @@ class _EventInternalMetadata(object):
     def is_outlier(self):
         return getattr(self, "outlier", False)
 
-    def is_invite_from_remote(self):
-        return getattr(self, "invite_from_remote", False)
+    def is_out_of_band_membership(self):
+        """Whether this is an out of band membership, like an invite or an invite
+        rejection. This is needed as those events are marked as outliers, but
+        they still need to be processed as if they're new events (e.g. updating
+        invite state in the database, relaying to clients, etc).
+        """
+        return getattr(self, "out_of_band_membership", False)
 
     def get_send_on_behalf_of(self):
         """Whether this server should send the event on behalf of another server.
@@ -179,6 +185,8 @@ class EventBase(object):
 
 
 class FrozenEvent(EventBase):
+    format_version = EventFormatVersions.V1  # All events of this type are V1
+
     def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
         event_dict = dict(event_dict)
 
@@ -232,3 +240,19 @@ class FrozenEvent(EventBase):
             self.get("type", None),
             self.get("state_key", None),
         )
+
+
+def room_version_to_event_format(room_version):
+    """Converts a room version string to the event format
+
+    Args:
+        room_version (str)
+
+    Returns:
+        int
+    """
+    if room_version not in KNOWN_ROOM_VERSIONS:
+        # We should have already checked version, so this should not happen
+        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
+    return EventFormatVersions.V1
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index b7ad729c63..d749bfdd3a 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -43,8 +43,8 @@ class FederationBase(object):
         self._clock = hs.get_clock()
 
     @defer.inlineCallbacks
-    def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
-                                       include_none=False):
+    def _check_sigs_and_hash_and_fetch(self, origin, pdus, room_version,
+                                       outlier=False, include_none=False):
         """Takes a list of PDUs and checks the signatures and hashs of each
         one. If a PDU fails its signature check then we check if we have it in
         the database and if not then request if from the originating server of
@@ -56,8 +56,12 @@ class FederationBase(object):
         a new list.
 
         Args:
+            origin (str)
             pdu (list)
-            outlier (bool)
+            room_version (str)
+            outlier (bool): Whether the events are outliers or not
+            include_none (str): Whether to include None in the returned list
+                for events that have failed their checks
 
         Returns:
             Deferred : A list of PDUs that have valid signatures and hashes.
@@ -84,6 +88,7 @@ class FederationBase(object):
                     res = yield self.get_pdu(
                         destinations=[pdu.origin],
                         event_id=pdu.event_id,
+                        room_version=room_version,
                         outlier=outlier,
                         timeout=10000,
                     )
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index d05ed91d64..33ecabca29 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -25,14 +25,20 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
-from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
+from synapse.api.constants import (
+    KNOWN_ROOM_VERSIONS,
+    EventTypes,
+    Membership,
+    RoomVersions,
+)
 from synapse.api.errors import (
     CodeMessageException,
     FederationDeniedError,
     HttpResponseException,
     SynapseError,
 )
-from synapse.events import builder
+from synapse.crypto.event_signing import add_hashes_and_signatures
+from synapse.events import room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -66,6 +72,8 @@ class FederationClient(FederationBase):
         self.state = hs.get_state_handler()
         self.transport_layer = hs.get_federation_transport_client()
 
+        self.event_builder_factory = hs.get_event_builder_factory()
+
         self._get_pdu_cache = ExpiringCache(
             cache_name="get_pdu_cache",
             clock=self._clock,
@@ -202,7 +210,8 @@ class FederationClient(FederationBase):
 
     @defer.inlineCallbacks
     @log_function
-    def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
+    def get_pdu(self, destinations, event_id, room_version, outlier=False,
+                timeout=None):
         """Requests the PDU with given origin and ID from the remote home
         servers.
 
@@ -212,6 +221,7 @@ class FederationClient(FederationBase):
         Args:
             destinations (list): Which home servers to query
             event_id (str): event to fetch
+            room_version (str): version of the room
             outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
                 it's from an arbitary point in the context as opposed to part
                 of the current block of PDUs. Defaults to `False`
@@ -352,10 +362,13 @@ class FederationClient(FederationBase):
             ev.event_id for ev in itertools.chain(pdus, auth_chain)
         ])
 
+        room_version = yield self.store.get_room_version(room_id)
+
         signed_pdus = yield self._check_sigs_and_hash_and_fetch(
             destination,
             [p for p in pdus if p.event_id not in seen_events],
-            outlier=True
+            outlier=True,
+            room_version=room_version,
         )
         signed_pdus.extend(
             seen_events[p.event_id] for p in pdus if p.event_id in seen_events
@@ -364,7 +377,8 @@ class FederationClient(FederationBase):
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
             destination,
             [p for p in auth_chain if p.event_id not in seen_events],
-            outlier=True
+            outlier=True,
+            room_version=room_version,
         )
         signed_auth.extend(
             seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
@@ -411,6 +425,8 @@ class FederationClient(FederationBase):
             random.shuffle(srvs)
             return srvs
 
+        room_version = yield self.store.get_room_version(room_id)
+
         batch_size = 20
         missing_events = list(missing_events)
         for i in range(0, len(missing_events), batch_size):
@@ -421,6 +437,7 @@ class FederationClient(FederationBase):
                     self.get_pdu,
                     destinations=random_server_list(),
                     event_id=e_id,
+                    room_version=room_version,
                 )
                 for e_id in batch
             ]
@@ -450,8 +467,11 @@ class FederationClient(FederationBase):
             for p in res["auth_chain"]
         ]
 
+        room_version = yield self.store.get_room_version(room_id)
+
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True
+            destination, auth_chain,
+            outlier=True, room_version=room_version,
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -522,6 +542,8 @@ class FederationClient(FederationBase):
         Does so by asking one of the already participating servers to create an
         event with proper context.
 
+        Returns a fully signed and hashed event.
+
         Note that this does not append any events to any graphs.
 
         Args:
@@ -536,8 +558,10 @@ class FederationClient(FederationBase):
             params (dict[str, str|Iterable[str]]): Query parameters to include in the
                 request.
         Return:
-            Deferred: resolves to a tuple of (origin (str), event (object))
-            where origin is the remote homeserver which generated the event.
+            Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
+            `(origin, event, event_format)` where origin is the remote
+            homeserver which generated the event, and event_format is one of
+            `synapse.api.constants.EventFormatVersions`.
 
             Fails with a ``SynapseError`` if the chosen remote server
             returns a 300/400 code.
@@ -557,6 +581,11 @@ class FederationClient(FederationBase):
                 destination, room_id, user_id, membership, params,
             )
 
+            # Note: If not supplied, the room version may be either v1 or v2,
+            # however either way the event format version will be v1.
+            room_version = ret.get("room_version", RoomVersions.V1)
+            event_format = room_version_to_event_format(room_version)
+
             pdu_dict = ret.get("event", None)
             if not isinstance(pdu_dict, dict):
                 raise InvalidResponseError("Bad 'event' field in response")
@@ -571,10 +600,21 @@ class FederationClient(FederationBase):
             if "prev_state" not in pdu_dict:
                 pdu_dict["prev_state"] = []
 
-            ev = builder.EventBuilder(pdu_dict)
+            # Strip off the fields that we want to clobber.
+            pdu_dict.pop("origin", None)
+            pdu_dict.pop("origin_server_ts", None)
+            pdu_dict.pop("unsigned", None)
+
+            builder = self.event_builder_factory.new(pdu_dict)
+            add_hashes_and_signatures(
+                builder,
+                self.hs.hostname,
+                self.hs.config.signing_key[0]
+            )
+            ev = builder.build()
 
             defer.returnValue(
-                (destination, ev)
+                (destination, ev, event_format)
             )
 
         return self._try_destination_list(
@@ -650,9 +690,21 @@ class FederationClient(FederationBase):
                 for p in itertools.chain(state, auth_chain)
             }
 
+            room_version = None
+            for e in state:
+                if (e.type, e.state_key) == (EventTypes.Create, ""):
+                    room_version = e.content.get("room_version", RoomVersions.V1)
+                    break
+
+            if room_version is None:
+                # If the state doesn't have a create event then the room is
+                # invalid, and it would fail auth checks anyway.
+                raise SynapseError(400, "No create event in state")
+
             valid_pdus = yield self._check_sigs_and_hash_and_fetch(
                 destination, list(pdus.values()),
                 outlier=True,
+                room_version=room_version,
             )
 
             valid_pdus_map = {
@@ -790,8 +842,10 @@ class FederationClient(FederationBase):
             for e in content["auth_chain"]
         ]
 
+        room_version = yield self.store.get_room_version(room_id)
+
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True
+            destination, auth_chain, outlier=True, room_version=room_version,
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -838,8 +892,10 @@ class FederationClient(FederationBase):
                 for e in content.get("events", [])
             ]
 
+            room_version = yield self.store.get_room_version(room_id)
+
             signed_events = yield self._check_sigs_and_hash_and_fetch(
-                destination, events, outlier=False
+                destination, events, outlier=False, room_version=room_version,
             )
         except HttpResponseException as e:
             if not e.code == 400:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 98722ae543..dde166e295 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -369,13 +369,13 @@ class FederationServer(FederationBase):
         })
 
     @defer.inlineCallbacks
-    def on_invite_request(self, origin, content):
+    def on_invite_request(self, origin, content, room_version):
         pdu = event_from_pdu_json(content)
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
         ret_pdu = yield self.handler.on_invite_request(origin, pdu)
         time_now = self._clock.time_msec()
-        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
+        defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
 
     @defer.inlineCallbacks
     def on_send_join_request(self, origin, content):
@@ -400,8 +400,14 @@ class FederationServer(FederationBase):
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, room_id)
         pdu = yield self.handler.on_make_leave_request(room_id, user_id)
+
+        room_version = yield self.store.get_room_version(room_id)
+
         time_now = self._clock.time_msec()
-        defer.returnValue({"event": pdu.get_pdu_json(time_now)})
+        defer.returnValue({
+            "event": pdu.get_pdu_json(time_now),
+            "room_version": room_version,
+        })
 
     @defer.inlineCallbacks
     def on_send_leave_request(self, origin, content):
@@ -457,8 +463,10 @@ class FederationServer(FederationBase):
                 for e in content["auth_chain"]
             ]
 
+            room_version = yield self.store.get_room_version(room_id)
+
             signed_auth = yield self._check_sigs_and_hash_and_fetch(
-                origin, auth_chain, outlier=True
+                origin, auth_chain, outlier=True, room_version=room_version,
             )
 
             ret = yield self.handler.on_query_auth(
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index edba5a9808..260178c47b 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -21,7 +21,7 @@ from six.moves import urllib
 from twisted.internet import defer
 
 from synapse.api.constants import Membership
-from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.api.urls import FEDERATION_V1_PREFIX
 from synapse.util.logutils import log_function
 
 logger = logging.getLogger(__name__)
@@ -51,7 +51,7 @@ class TransportLayerClient(object):
         logger.debug("get_room_state dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_path(PREFIX, "/state/%s/", room_id)
+        path = _create_v1_path("/state/%s/", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
         )
@@ -73,7 +73,7 @@ class TransportLayerClient(object):
         logger.debug("get_room_state_ids dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_path(PREFIX, "/state_ids/%s/", room_id)
+        path = _create_v1_path("/state_ids/%s/", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
         )
@@ -95,7 +95,7 @@ class TransportLayerClient(object):
         logger.debug("get_pdu dest=%s, event_id=%s",
                      destination, event_id)
 
-        path = _create_path(PREFIX, "/event/%s/", event_id)
+        path = _create_v1_path("/event/%s/", event_id)
         return self.client.get_json(destination, path=path, timeout=timeout)
 
     @log_function
@@ -121,7 +121,7 @@ class TransportLayerClient(object):
             # TODO: raise?
             return
 
-        path = _create_path(PREFIX, "/backfill/%s/", room_id)
+        path = _create_v1_path("/backfill/%s/", room_id)
 
         args = {
             "v": event_tuples,
@@ -167,7 +167,7 @@ class TransportLayerClient(object):
         # generated by the json_data_callback.
         json_data = transaction.get_dict()
 
-        path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id)
+        path = _create_v1_path("/send/%s/", transaction.transaction_id)
 
         response = yield self.client.put_json(
             transaction.destination,
@@ -184,7 +184,7 @@ class TransportLayerClient(object):
     @log_function
     def make_query(self, destination, query_type, args, retry_on_dns_fail,
                    ignore_backoff=False):
-        path = _create_path(PREFIX, "/query/%s", query_type)
+        path = _create_v1_path("/query/%s", query_type)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -231,7 +231,7 @@ class TransportLayerClient(object):
                 "make_membership_event called with membership='%s', must be one of %s" %
                 (membership, ",".join(valid_memberships))
             )
-        path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id)
+        path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id)
 
         ignore_backoff = False
         retry_on_dns_fail = False
@@ -258,7 +258,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_join(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id)
+        path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -271,7 +271,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_leave(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id)
+        path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -290,7 +290,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_invite(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id)
+        path = _create_v1_path("/invite/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -306,7 +306,7 @@ class TransportLayerClient(object):
     def get_public_rooms(self, remote_server, limit, since_token,
                          search_filter=None, include_all_networks=False,
                          third_party_instance_id=None):
-        path = PREFIX + "/publicRooms"
+        path = _create_v1_path("/publicRooms")
 
         args = {
             "include_all_networks": "true" if include_all_networks else "false",
@@ -332,7 +332,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def exchange_third_party_invite(self, destination, room_id, event_dict):
-        path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,)
+        path = _create_v1_path("/exchange_third_party_invite/%s", room_id,)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -345,7 +345,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def get_event_auth(self, destination, room_id, event_id):
-        path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id)
+        path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -357,7 +357,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_query_auth(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id)
+        path = _create_v1_path("/query_auth/%s/%s", room_id, event_id)
 
         content = yield self.client.post_json(
             destination=destination,
@@ -392,7 +392,7 @@ class TransportLayerClient(object):
         Returns:
             A dict containg the device keys.
         """
-        path = PREFIX + "/user/keys/query"
+        path = _create_v1_path("/user/keys/query")
 
         content = yield self.client.post_json(
             destination=destination,
@@ -419,7 +419,7 @@ class TransportLayerClient(object):
         Returns:
             A dict containg the device keys.
         """
-        path = _create_path(PREFIX, "/user/devices/%s", user_id)
+        path = _create_v1_path("/user/devices/%s", user_id)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -455,7 +455,7 @@ class TransportLayerClient(object):
             A dict containg the one-time keys.
         """
 
-        path = PREFIX + "/user/keys/claim"
+        path = _create_v1_path("/user/keys/claim")
 
         content = yield self.client.post_json(
             destination=destination,
@@ -469,7 +469,7 @@ class TransportLayerClient(object):
     @log_function
     def get_missing_events(self, destination, room_id, earliest_events,
                            latest_events, limit, min_depth, timeout):
-        path = _create_path(PREFIX, "/get_missing_events/%s", room_id,)
+        path = _create_v1_path("/get_missing_events/%s", room_id,)
 
         content = yield self.client.post_json(
             destination=destination,
@@ -489,7 +489,7 @@ class TransportLayerClient(object):
     def get_group_profile(self, destination, group_id, requester_user_id):
         """Get a group profile
         """
-        path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
+        path = _create_v1_path("/groups/%s/profile", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -508,7 +508,7 @@ class TransportLayerClient(object):
             requester_user_id (str)
             content (dict): The new profile of the group
         """
-        path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
+        path = _create_v1_path("/groups/%s/profile", group_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -522,7 +522,7 @@ class TransportLayerClient(object):
     def get_group_summary(self, destination, group_id, requester_user_id):
         """Get a group summary
         """
-        path = _create_path(PREFIX, "/groups/%s/summary", group_id,)
+        path = _create_v1_path("/groups/%s/summary", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -535,7 +535,7 @@ class TransportLayerClient(object):
     def get_rooms_in_group(self, destination, group_id, requester_user_id):
         """Get all rooms in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/rooms", group_id,)
+        path = _create_v1_path("/groups/%s/rooms", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -548,7 +548,7 @@ class TransportLayerClient(object):
                           content):
         """Add a room to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
+        path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -562,8 +562,8 @@ class TransportLayerClient(object):
                              config_key, content):
         """Update room in group
         """
-        path = _create_path(
-            PREFIX, "/groups/%s/room/%s/config/%s",
+        path = _create_v1_path(
+            "/groups/%s/room/%s/config/%s",
             group_id, room_id, config_key,
         )
 
@@ -578,7 +578,7 @@ class TransportLayerClient(object):
     def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
         """Remove a room from a group
         """
-        path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
+        path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -591,7 +591,7 @@ class TransportLayerClient(object):
     def get_users_in_group(self, destination, group_id, requester_user_id):
         """Get users in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users", group_id,)
+        path = _create_v1_path("/groups/%s/users", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -604,7 +604,7 @@ class TransportLayerClient(object):
     def get_invited_users_in_group(self, destination, group_id, requester_user_id):
         """Get users that have been invited to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,)
+        path = _create_v1_path("/groups/%s/invited_users", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -617,8 +617,8 @@ class TransportLayerClient(object):
     def accept_group_invite(self, destination, group_id, user_id, content):
         """Accept a group invite
         """
-        path = _create_path(
-            PREFIX, "/groups/%s/users/%s/accept_invite",
+        path = _create_v1_path(
+            "/groups/%s/users/%s/accept_invite",
             group_id, user_id,
         )
 
@@ -633,7 +633,7 @@ class TransportLayerClient(object):
     def join_group(self, destination, group_id, user_id, content):
         """Attempts to join a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -646,7 +646,7 @@ class TransportLayerClient(object):
     def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
         """Invite a user to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -662,7 +662,7 @@ class TransportLayerClient(object):
         invited.
         """
 
-        path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id)
+        path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -676,7 +676,7 @@ class TransportLayerClient(object):
                                user_id, content):
         """Remove a user fron a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -693,7 +693,7 @@ class TransportLayerClient(object):
         kicked from the group.
         """
 
-        path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id)
+        path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -708,7 +708,7 @@ class TransportLayerClient(object):
         the attestations
         """
 
-        path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id)
+        path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -723,12 +723,12 @@ class TransportLayerClient(object):
         """Update a room entry in a group summary
         """
         if category_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/categories/%s/rooms/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/categories/%s/rooms/%s",
                 group_id, category_id, room_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
+            path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -744,12 +744,12 @@ class TransportLayerClient(object):
         """Delete a room entry in a group summary
         """
         if category_id:
-            path = _create_path(
-                PREFIX + "/groups/%s/summary/categories/%s/rooms/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/categories/%s/rooms/%s",
                 group_id, category_id, room_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
+            path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -762,7 +762,7 @@ class TransportLayerClient(object):
     def get_group_categories(self, destination, group_id, requester_user_id):
         """Get all categories in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories", group_id,)
+        path = _create_v1_path("/groups/%s/categories", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -775,7 +775,7 @@ class TransportLayerClient(object):
     def get_group_category(self, destination, group_id, requester_user_id, category_id):
         """Get category info in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -789,7 +789,7 @@ class TransportLayerClient(object):
                               content):
         """Update a category in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -804,7 +804,7 @@ class TransportLayerClient(object):
                               category_id):
         """Delete a category in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -817,7 +817,7 @@ class TransportLayerClient(object):
     def get_group_roles(self, destination, group_id, requester_user_id):
         """Get all roles in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles", group_id,)
+        path = _create_v1_path("/groups/%s/roles", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -830,7 +830,7 @@ class TransportLayerClient(object):
     def get_group_role(self, destination, group_id, requester_user_id, role_id):
         """Get a roles info
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -844,7 +844,7 @@ class TransportLayerClient(object):
                           content):
         """Update a role in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -858,7 +858,7 @@ class TransportLayerClient(object):
     def delete_group_role(self, destination, group_id, requester_user_id, role_id):
         """Delete a role in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -873,12 +873,12 @@ class TransportLayerClient(object):
         """Update a users entry in a group
         """
         if role_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/roles/%s/users/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/roles/%s/users/%s",
                 group_id, role_id, user_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
+            path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -893,7 +893,7 @@ class TransportLayerClient(object):
                               content):
         """Sets the join policy for a group
         """
-        path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,)
+        path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id,)
 
         return self.client.put_json(
             destination=destination,
@@ -909,12 +909,12 @@ class TransportLayerClient(object):
         """Delete a users entry in a group
         """
         if role_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/roles/%s/users/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/roles/%s/users/%s",
                 group_id, role_id, user_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
+            path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -927,7 +927,7 @@ class TransportLayerClient(object):
         """Get the groups a list of users are publicising
         """
 
-        path = PREFIX + "/get_groups_publicised"
+        path = _create_v1_path("/get_groups_publicised")
 
         content = {"user_ids": user_ids}
 
@@ -939,20 +939,22 @@ class TransportLayerClient(object):
         )
 
 
-def _create_path(prefix, path, *args):
-    """Creates a path from the prefix, path template and args. Ensures that
-    all args are url encoded.
+def _create_v1_path(path, *args):
+    """Creates a path against V1 federation API from the path template and
+    args. Ensures that all args are url encoded.
 
     Example:
 
-        _create_path(PREFIX, "/event/%s/", event_id)
+        _create_v1_path("/event/%s/", event_id)
 
     Args:
-        prefix (str)
         path (str): String template for the path
         args: ([str]): Args to insert into path. Each arg will be url encoded
 
     Returns:
         str
     """
-    return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+    return (
+        FEDERATION_V1_PREFIX
+        + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+    )
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 3553f418f1..4557a9e66e 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -21,8 +21,9 @@ import re
 from twisted.internet import defer
 
 import synapse
+from synapse.api.constants import RoomVersions
 from synapse.api.errors import Codes, FederationDeniedError, SynapseError
-from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.http.server import JsonResource
 from synapse.http.servlet import (
@@ -227,6 +228,8 @@ class BaseFederationServlet(object):
     """
     REQUIRE_AUTH = True
 
+    PREFIX = FEDERATION_V1_PREFIX  # Allows specifying the API version
+
     def __init__(self, handler, authenticator, ratelimiter, server_name):
         self.handler = handler
         self.authenticator = authenticator
@@ -286,7 +289,7 @@ class BaseFederationServlet(object):
         return new_func
 
     def register(self, server):
-        pattern = re.compile("^" + PREFIX + self.PATH + "$")
+        pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
 
         for method in ("GET", "PUT", "POST"):
             code = getattr(self, "on_%s" % (method), None)
@@ -488,14 +491,46 @@ class FederationSendJoinServlet(BaseFederationServlet):
         defer.returnValue((200, content))
 
 
-class FederationInviteServlet(BaseFederationServlet):
+class FederationV1InviteServlet(BaseFederationServlet):
     PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
 
     @defer.inlineCallbacks
     def on_PUT(self, origin, content, query, context, event_id):
+        # We don't get a room version, so we have to assume its EITHER v1 or
+        # v2. This is "fine" as the only difference between V1 and V2 is the
+        # state resolution algorithm, and we don't use that for processing
+        # invites
+        content = yield self.handler.on_invite_request(
+            origin, content, room_version=RoomVersions.V1,
+        )
+
+        # V1 federation API is defined to return a content of `[200, {...}]`
+        # due to a historical bug.
+        defer.returnValue((200, (200, content)))
+
+
+class FederationV2InviteServlet(BaseFederationServlet):
+    PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
+
+    PREFIX = FEDERATION_V2_PREFIX
+
+    @defer.inlineCallbacks
+    def on_PUT(self, origin, content, query, context, event_id):
         # TODO(paul): assert that context/event_id parsed from path actually
         #   match those given in content
-        content = yield self.handler.on_invite_request(origin, content)
+
+        room_version = content["room_version"]
+        event = content["event"]
+        invite_room_state = content["invite_room_state"]
+
+        # Synapse expects invite_room_state to be in unsigned, as it is in v1
+        # API
+
+        event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
+
+        content = yield self.handler.on_invite_request(
+            origin, event, room_version=room_version,
+        )
         defer.returnValue((200, content))
 
 
@@ -1263,7 +1298,8 @@ FEDERATION_SERVLET_CLASSES = (
     FederationEventServlet,
     FederationSendJoinServlet,
     FederationSendLeaveServlet,
-    FederationInviteServlet,
+    FederationV1InviteServlet,
+    FederationV2InviteServlet,
     FederationQueryAuthServlet,
     FederationGetMissingEventsServlet,
     FederationEventAuthServlet,
diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py
new file mode 100644
index 0000000000..73ea7ed018
--- /dev/null
+++ b/synapse/handlers/acme.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import attr
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import serverFromString
+from twisted.python.filepath import FilePath
+from twisted.python.url import URL
+from twisted.web import server, static
+from twisted.web.resource import Resource
+
+logger = logging.getLogger(__name__)
+
+try:
+    from txacme.interfaces import ICertificateStore
+
+    @attr.s
+    @implementer(ICertificateStore)
+    class ErsatzStore(object):
+        """
+        A store that only stores in memory.
+        """
+
+        certs = attr.ib(default=attr.Factory(dict))
+
+        def store(self, server_name, pem_objects):
+            self.certs[server_name] = [o.as_bytes() for o in pem_objects]
+            return defer.succeed(None)
+
+
+except ImportError:
+    # txacme is missing
+    pass
+
+
+class AcmeHandler(object):
+    def __init__(self, hs):
+        self.hs = hs
+        self.reactor = hs.get_reactor()
+
+    @defer.inlineCallbacks
+    def start_listening(self):
+
+        # Configure logging for txacme, if you need to debug
+        # from eliot import add_destinations
+        # from eliot.twisted import TwistedDestination
+        #
+        # add_destinations(TwistedDestination())
+
+        from txacme.challenges import HTTP01Responder
+        from txacme.service import AcmeIssuingService
+        from txacme.endpoint import load_or_create_client_key
+        from txacme.client import Client
+        from josepy.jwa import RS256
+
+        self._store = ErsatzStore()
+        responder = HTTP01Responder()
+
+        self._issuer = AcmeIssuingService(
+            cert_store=self._store,
+            client_creator=(
+                lambda: Client.from_url(
+                    reactor=self.reactor,
+                    url=URL.from_text(self.hs.config.acme_url),
+                    key=load_or_create_client_key(
+                        FilePath(self.hs.config.config_dir_path)
+                    ),
+                    alg=RS256,
+                )
+            ),
+            clock=self.reactor,
+            responders=[responder],
+        )
+
+        well_known = Resource()
+        well_known.putChild(b'acme-challenge', responder.resource)
+        responder_resource = Resource()
+        responder_resource.putChild(b'.well-known', well_known)
+        responder_resource.putChild(b'check', static.Data(b'OK', b'text/plain'))
+
+        srv = server.Site(responder_resource)
+
+        listeners = []
+
+        for host in self.hs.config.acme_bind_addresses:
+            logger.info(
+                "Listening for ACME requests on %s:%s", host, self.hs.config.acme_port
+            )
+            endpoint = serverFromString(
+                self.reactor, "tcp:%s:interface=%s" % (self.hs.config.acme_port, host)
+            )
+            listeners.append(endpoint.listen(srv))
+
+        # Make sure we are registered to the ACME server. There's no public API
+        # for this, it is usually triggered by startService, but since we don't
+        # want it to control where we save the certificates, we have to reach in
+        # and trigger the registration machinery ourselves.
+        self._issuer._registered = False
+        yield self._issuer._ensure_registered()
+
+        # Return a Deferred that will fire when all the servers have started up.
+        yield defer.DeferredList(listeners, fireOnOneErrback=True, consumeErrors=True)
+
+    @defer.inlineCallbacks
+    def provision_certificate(self):
+
+        logger.warning("Reprovisioning %s", self.hs.hostname)
+
+        try:
+            yield self._issuer.issue_cert(self.hs.hostname)
+        except Exception:
+            logger.exception("Fail!")
+            raise
+        logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
+        cert_chain = self._store.certs[self.hs.hostname]
+
+        try:
+            with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
+                for x in cert_chain:
+                    if x.startswith(b"-----BEGIN RSA PRIVATE KEY-----"):
+                        private_key_file.write(x)
+
+            with open(self.hs.config.tls_certificate_file, "wb") as certificate_file:
+                for x in cert_chain:
+                    if x.startswith(b"-----BEGIN CERTIFICATE-----"):
+                        certificate_file.write(x)
+        except Exception:
+            logger.exception("Failed saving!")
+            raise
+
+        defer.returnValue(True)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index a3bb864bb2..c52dca1b81 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -34,6 +34,7 @@ from synapse.api.constants import (
     EventTypes,
     Membership,
     RejectedReason,
+    RoomVersions,
 )
 from synapse.api.errors import (
     AuthError,
@@ -43,10 +44,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
-from synapse.crypto.event_signing import (
-    add_hashes_and_signatures,
-    compute_event_signature,
-)
+from synapse.crypto.event_signing import compute_event_signature
 from synapse.events.validator import EventValidator
 from synapse.replication.http.federation import (
     ReplicationCleanRoomRestServlet,
@@ -58,7 +56,6 @@ from synapse.types import UserID, get_domain_from_id
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.async_helpers import Linearizer
 from synapse.util.distributor import user_joined_room
-from synapse.util.frozenutils import unfreeze
 from synapse.util.logutils import log_function
 from synapse.util.retryutils import NotRetryingDestination
 from synapse.visibility import filter_events_for_server
@@ -342,6 +339,8 @@ class FederationHandler(BaseHandler):
                             room_id, event_id, p,
                         )
 
+                        room_version = yield self.store.get_room_version(room_id)
+
                         with logcontext.nested_logging_context(p):
                             # note that if any of the missing prevs share missing state or
                             # auth events, the requests to fetch those events are deduped
@@ -355,7 +354,7 @@ class FederationHandler(BaseHandler):
                             # we want the state *after* p; get_state_for_room returns the
                             # state *before* p.
                             remote_event = yield self.federation_client.get_pdu(
-                                [origin], p, outlier=True,
+                                [origin], p, room_version, outlier=True,
                             )
 
                             if remote_event is None:
@@ -379,7 +378,6 @@ class FederationHandler(BaseHandler):
                             for x in remote_state:
                                 event_map[x.event_id] = x
 
-                    room_version = yield self.store.get_room_version(room_id)
                     state_map = yield resolve_events_with_store(
                         room_version, state_maps, event_map,
                         state_res_store=StateResolutionStore(self.store),
@@ -655,6 +653,8 @@ class FederationHandler(BaseHandler):
         if dest == self.server_name:
             raise SynapseError(400, "Can't backfill from self.")
 
+        room_version = yield self.store.get_room_version(room_id)
+
         events = yield self.federation_client.backfill(
             dest,
             room_id,
@@ -748,6 +748,7 @@ class FederationHandler(BaseHandler):
                             self.federation_client.get_pdu,
                             [dest],
                             event_id,
+                            room_version=room_version,
                             outlier=True,
                             timeout=10000,
                         )
@@ -1083,7 +1084,6 @@ class FederationHandler(BaseHandler):
         handled_events = set()
 
         try:
-            event = self._sign_event(event)
             # Try the host we successfully got a response to /make_join/
             # request first.
             try:
@@ -1287,7 +1287,7 @@ class FederationHandler(BaseHandler):
             )
 
         event.internal_metadata.outlier = True
-        event.internal_metadata.invite_from_remote = True
+        event.internal_metadata.out_of_band_membership = True
 
         event.signatures.update(
             compute_event_signature(
@@ -1313,7 +1313,7 @@ class FederationHandler(BaseHandler):
         # Mark as outlier as we don't have any state for this event; we're not
         # even in the room.
         event.internal_metadata.outlier = True
-        event = self._sign_event(event)
+        event.internal_metadata.out_of_band_membership = True
 
         # Try the host that we succesfully called /make_leave/ on first for
         # the /send_leave/ request.
@@ -1336,7 +1336,7 @@ class FederationHandler(BaseHandler):
     @defer.inlineCallbacks
     def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
                                content={}, params=None):
-        origin, pdu = yield self.federation_client.make_membership_event(
+        origin, pdu, _ = yield self.federation_client.make_membership_event(
             target_hosts,
             room_id,
             user_id,
@@ -1357,27 +1357,6 @@ class FederationHandler(BaseHandler):
         assert(event.room_id == room_id)
         defer.returnValue((origin, event))
 
-    def _sign_event(self, event):
-        event.internal_metadata.outlier = False
-
-        builder = self.event_builder_factory.new(
-            unfreeze(event.get_pdu_json())
-        )
-
-        builder.event_id = self.event_builder_factory.create_event_id()
-        builder.origin = self.hs.hostname
-
-        if not hasattr(event, "signatures"):
-            builder.signatures = {}
-
-        add_hashes_and_signatures(
-            builder,
-            self.hs.hostname,
-            self.hs.config.signing_key[0],
-        )
-
-        return builder.build()
-
     @defer.inlineCallbacks
     @log_function
     def on_make_leave_request(self, room_id, user_id):
@@ -1659,6 +1638,13 @@ class FederationHandler(BaseHandler):
                 create_event = e
                 break
 
+        if create_event is None:
+            # If the state doesn't have a create event then the room is
+            # invalid, and it would fail auth checks anyway.
+            raise SynapseError(400, "No create event in state")
+
+        room_version = create_event.content.get("room_version", RoomVersions.V1)
+
         missing_auth_events = set()
         for e in itertools.chain(auth_events, state, [event]):
             for e_id in e.auth_event_ids():
@@ -1669,6 +1655,7 @@ class FederationHandler(BaseHandler):
             m_ev = yield self.federation_client.get_pdu(
                 [origin],
                 e_id,
+                room_version=room_version,
                 outlier=True,
                 timeout=10000,
             )
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 581e96c743..cb8c5f77dd 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -269,6 +269,7 @@ class RoomCreationHandler(BaseHandler):
             (EventTypes.RoomHistoryVisibility, ""),
             (EventTypes.GuestAccess, ""),
             (EventTypes.RoomAvatar, ""),
+            (EventTypes.Encryption, ""),
         )
 
         old_room_state_ids = yield self.store.get_filtered_current_state_ids(
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index dc88620885..13e212d669 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -73,8 +73,14 @@ class RoomListHandler(BaseHandler):
             # We explicitly don't bother caching searches or requests for
             # appservice specific lists.
             logger.info("Bypassing cache as search request.")
+
+            # XXX: Quick hack to stop room directory queries taking too long.
+            # Timeout request after 60s. Probably want a more fundamental
+            # solution at some point
+            timeout = self.clock.time() + 60
             return self._get_public_room_list(
-                limit, since_token, search_filter, network_tuple=network_tuple,
+                limit, since_token, search_filter,
+                network_tuple=network_tuple, timeout=timeout,
             )
 
         key = (limit, since_token, network_tuple)
@@ -87,7 +93,8 @@ class RoomListHandler(BaseHandler):
     @defer.inlineCallbacks
     def _get_public_room_list(self, limit=None, since_token=None,
                               search_filter=None,
-                              network_tuple=EMPTY_THIRD_PARTY_ID,):
+                              network_tuple=EMPTY_THIRD_PARTY_ID,
+                              timeout=None,):
         if since_token and since_token != "END":
             since_token = RoomListNextBatch.from_token(since_token)
         else:
@@ -202,6 +209,9 @@ class RoomListHandler(BaseHandler):
 
         chunk = []
         for i in range(0, len(rooms_to_scan), step):
+            if timeout and self.clock.time() > timeout:
+                raise Exception("Timed out searching room directory")
+
             batch = rooms_to_scan[i:i + step]
             logger.info("Processing %i rooms for result", len(batch))
             yield concurrently_execute(
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 3c40999338..120815b09b 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -19,6 +19,7 @@ from six import iteritems
 
 from twisted.internet import defer
 
+import synapse.metrics
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.roommember import ProfileInfo
@@ -163,6 +164,11 @@ class UserDirectoryHandler(object):
                 yield self._handle_deltas(deltas)
 
                 self.pos = deltas[-1]["stream_id"]
+
+                # Expose current event processing position to prometheus
+                synapse.metrics.event_processing_positions.labels(
+                    "user_dir").set(self.pos)
+
                 yield self.store.update_user_directory_stream_pos(self.pos)
 
     @defer.inlineCallbacks
diff --git a/synapse/http/client.py b/synapse/http/client.py
index afcf698b29..47a1f82ff0 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -333,9 +333,10 @@ class SimpleHttpClient(object):
             "POST", uri, headers=Headers(actual_headers), data=query_bytes
         )
 
+        body = yield make_deferred_yieldable(readBody(response))
+
         if 200 <= response.code < 300:
-            body = yield make_deferred_yieldable(treq.json_content(response))
-            defer.returnValue(body)
+            defer.returnValue(json.loads(body))
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py
index f86a0b624e..cd79ebab62 100644
--- a/synapse/http/endpoint.py
+++ b/synapse/http/endpoint.py
@@ -12,30 +12,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import collections
 import logging
-import random
 import re
-import time
-
-from twisted.internet import defer
-from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
-from twisted.internet.error import ConnectError
-from twisted.names import client, dns
-from twisted.names.error import DNSNameError, DomainError
 
 logger = logging.getLogger(__name__)
 
-SERVER_CACHE = {}
-
-# our record of an individual server which can be tried to reach a destination.
-#
-# "host" is the hostname acquired from the SRV record. Except when there's
-# no SRV record, in which case it is the original hostname.
-_Server = collections.namedtuple(
-    "_Server", "priority weight host port expires"
-)
-
 
 def parse_server_name(server_name):
     """Split a server name into host/port parts.
@@ -100,264 +81,3 @@ def parse_and_validate_server_name(server_name):
         ))
 
     return host, port
-
-
-def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
-                               timeout=None):
-    """Construct an endpoint for the given matrix destination.
-
-    Args:
-        reactor: Twisted reactor.
-        destination (unicode): The name of the server to connect to.
-        tls_client_options_factory
-            (synapse.crypto.context_factory.ClientTLSOptionsFactory):
-            Factory which generates TLS options for client connections.
-        timeout (int): connection timeout in seconds
-    """
-
-    domain, port = parse_server_name(destination)
-
-    endpoint_kw_args = {}
-
-    if timeout is not None:
-        endpoint_kw_args.update(timeout=timeout)
-
-    if tls_client_options_factory is None:
-        transport_endpoint = HostnameEndpoint
-        default_port = 8008
-    else:
-        # the SNI string should be the same as the Host header, minus the port.
-        # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
-        # the Host header and SNI should therefore be the server_name of the remote
-        # server.
-        tls_options = tls_client_options_factory.get_options(domain)
-
-        def transport_endpoint(reactor, host, port, timeout):
-            return wrapClientTLS(
-                tls_options,
-                HostnameEndpoint(reactor, host, port, timeout=timeout),
-            )
-        default_port = 8448
-
-    if port is None:
-        return _WrappingEndpointFac(SRVClientEndpoint(
-            reactor, "matrix", domain, protocol="tcp",
-            default_port=default_port, endpoint=transport_endpoint,
-            endpoint_kw_args=endpoint_kw_args
-        ), reactor)
-    else:
-        return _WrappingEndpointFac(transport_endpoint(
-            reactor, domain, port, **endpoint_kw_args
-        ), reactor)
-
-
-class _WrappingEndpointFac(object):
-    def __init__(self, endpoint_fac, reactor):
-        self.endpoint_fac = endpoint_fac
-        self.reactor = reactor
-
-    @defer.inlineCallbacks
-    def connect(self, protocolFactory):
-        conn = yield self.endpoint_fac.connect(protocolFactory)
-        conn = _WrappedConnection(conn, self.reactor)
-        defer.returnValue(conn)
-
-
-class _WrappedConnection(object):
-    """Wraps a connection and calls abort on it if it hasn't seen any action
-    for 2.5-3 minutes.
-    """
-    __slots__ = ["conn", "last_request"]
-
-    def __init__(self, conn, reactor):
-        object.__setattr__(self, "conn", conn)
-        object.__setattr__(self, "last_request", time.time())
-        self._reactor = reactor
-
-    def __getattr__(self, name):
-        return getattr(self.conn, name)
-
-    def __setattr__(self, name, value):
-        setattr(self.conn, name, value)
-
-    def _time_things_out_maybe(self):
-        # We use a slightly shorter timeout here just in case the callLater is
-        # triggered early. Paranoia ftw.
-        # TODO: Cancel the previous callLater rather than comparing time.time()?
-        if time.time() - self.last_request >= 2.5 * 60:
-            self.abort()
-            # Abort the underlying TLS connection. The abort() method calls
-            # loseConnection() on the TLS connection which tries to
-            # shutdown the connection cleanly. We call abortConnection()
-            # since that will promptly close the TLS connection.
-            #
-            # In Twisted >18.4; the TLS connection will be None if it has closed
-            # which will make abortConnection() throw. Check that the TLS connection
-            # is not None before trying to close it.
-            if self.transport.getHandle() is not None:
-                self.transport.abortConnection()
-
-    def request(self, request):
-        self.last_request = time.time()
-
-        # Time this connection out if we haven't send a request in the last
-        # N minutes
-        # TODO: Cancel the previous callLater?
-        self._reactor.callLater(3 * 60, self._time_things_out_maybe)
-
-        d = self.conn.request(request)
-
-        def update_request_time(res):
-            self.last_request = time.time()
-            # TODO: Cancel the previous callLater?
-            self._reactor.callLater(3 * 60, self._time_things_out_maybe)
-            return res
-
-        d.addCallback(update_request_time)
-
-        return d
-
-
-class SRVClientEndpoint(object):
-    """An endpoint which looks up SRV records for a service.
-    Cycles through the list of servers starting with each call to connect
-    picking the next server.
-    Implements twisted.internet.interfaces.IStreamClientEndpoint.
-    """
-
-    def __init__(self, reactor, service, domain, protocol="tcp",
-                 default_port=None, endpoint=HostnameEndpoint,
-                 endpoint_kw_args={}):
-        self.reactor = reactor
-        self.service_name = "_%s._%s.%s" % (service, protocol, domain)
-
-        if default_port is not None:
-            self.default_server = _Server(
-                host=domain,
-                port=default_port,
-                priority=0,
-                weight=0,
-                expires=0,
-            )
-        else:
-            self.default_server = None
-
-        self.endpoint = endpoint
-        self.endpoint_kw_args = endpoint_kw_args
-
-        self.servers = None
-        self.used_servers = None
-
-    @defer.inlineCallbacks
-    def fetch_servers(self):
-        self.used_servers = []
-        self.servers = yield resolve_service(self.service_name)
-
-    def pick_server(self):
-        if not self.servers:
-            if self.used_servers:
-                self.servers = self.used_servers
-                self.used_servers = []
-                self.servers.sort()
-            elif self.default_server:
-                return self.default_server
-            else:
-                raise ConnectError(
-                    "No server available for %s" % self.service_name
-                )
-
-        # look for all servers with the same priority
-        min_priority = self.servers[0].priority
-        weight_indexes = list(
-            (index, server.weight + 1)
-            for index, server in enumerate(self.servers)
-            if server.priority == min_priority
-        )
-
-        total_weight = sum(weight for index, weight in weight_indexes)
-        target_weight = random.randint(0, total_weight)
-        for index, weight in weight_indexes:
-            target_weight -= weight
-            if target_weight <= 0:
-                server = self.servers[index]
-                # XXX: this looks totally dubious:
-                #
-                # (a) we never reuse a server until we have been through
-                #     all of the servers at the same priority, so if the
-                #     weights are A: 100, B:1, we always do ABABAB instead of
-                #     AAAA...AAAB (approximately).
-                #
-                # (b) After using all the servers at the lowest priority,
-                #     we move onto the next priority. We should only use the
-                #     second priority if servers at the top priority are
-                #     unreachable.
-                #
-                del self.servers[index]
-                self.used_servers.append(server)
-                return server
-
-    @defer.inlineCallbacks
-    def connect(self, protocolFactory):
-        if self.servers is None:
-            yield self.fetch_servers()
-        server = self.pick_server()
-        logger.info("Connecting to %s:%s", server.host, server.port)
-        endpoint = self.endpoint(
-            self.reactor, server.host, server.port, **self.endpoint_kw_args
-        )
-        connection = yield endpoint.connect(protocolFactory)
-        defer.returnValue(connection)
-
-
-@defer.inlineCallbacks
-def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time):
-    cache_entry = cache.get(service_name, None)
-    if cache_entry:
-        if all(s.expires > int(clock.time()) for s in cache_entry):
-            servers = list(cache_entry)
-            defer.returnValue(servers)
-
-    servers = []
-
-    try:
-        try:
-            answers, _, _ = yield dns_client.lookupService(service_name)
-        except DNSNameError:
-            defer.returnValue([])
-
-        if (len(answers) == 1
-                and answers[0].type == dns.SRV
-                and answers[0].payload
-                and answers[0].payload.target == dns.Name(b'.')):
-            raise ConnectError("Service %s unavailable" % service_name)
-
-        for answer in answers:
-            if answer.type != dns.SRV or not answer.payload:
-                continue
-
-            payload = answer.payload
-
-            servers.append(_Server(
-                host=str(payload.target),
-                port=int(payload.port),
-                priority=int(payload.priority),
-                weight=int(payload.weight),
-                expires=int(clock.time()) + answer.ttl,
-            ))
-
-        servers.sort()
-        cache[service_name] = list(servers)
-    except DomainError as e:
-        # We failed to resolve the name (other than a NameError)
-        # Try something in the cache, else rereaise
-        cache_entry = cache.get(service_name, None)
-        if cache_entry:
-            logger.warn(
-                "Failed to resolve %r, falling back to cache. %r",
-                service_name, e
-            )
-            servers = list(cache_entry)
-        else:
-            raise e
-
-    defer.returnValue(servers)
diff --git a/synapse/http/federation/__init__.py b/synapse/http/federation/__init__.py
new file mode 100644
index 0000000000..1453d04571
--- /dev/null
+++ b/synapse/http/federation/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
new file mode 100644
index 0000000000..0ec28c6696
--- /dev/null
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.web.client import URI, Agent, HTTPConnectionPool
+from twisted.web.iweb import IAgent
+
+from synapse.http.endpoint import parse_server_name
+from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
+from synapse.util.logcontext import make_deferred_yieldable
+
+logger = logging.getLogger(__name__)
+
+
+@implementer(IAgent)
+class MatrixFederationAgent(object):
+    """An Agent-like thing which provides a `request` method which will look up a matrix
+    server and send an HTTP request to it.
+
+    Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
+
+    Args:
+        reactor (IReactor): twisted reactor to use for underlying requests
+
+        tls_client_options_factory (ClientTLSOptionsFactory|None):
+            factory to use for fetching client tls options, or none to disable TLS.
+
+        srv_resolver (SrvResolver|None):
+            SRVResolver impl to use for looking up SRV records. None to use a default
+            implementation.
+    """
+
+    def __init__(
+        self, reactor, tls_client_options_factory, _srv_resolver=None,
+    ):
+        self._reactor = reactor
+        self._tls_client_options_factory = tls_client_options_factory
+        if _srv_resolver is None:
+            _srv_resolver = SrvResolver()
+        self._srv_resolver = _srv_resolver
+
+        self._pool = HTTPConnectionPool(reactor)
+        self._pool.retryAutomatically = False
+        self._pool.maxPersistentPerHost = 5
+        self._pool.cachedConnectionTimeout = 2 * 60
+
+    @defer.inlineCallbacks
+    def request(self, method, uri, headers=None, bodyProducer=None):
+        """
+        Args:
+            method (bytes): HTTP method: GET/POST/etc
+
+            uri (bytes): Absolute URI to be retrieved
+
+            headers (twisted.web.http_headers.Headers|None):
+                HTTP headers to send with the request, or None to
+                send no extra headers.
+
+            bodyProducer (twisted.web.iweb.IBodyProducer|None):
+                An object which can generate bytes to make up the
+                body of this request (for example, the properly encoded contents of
+                a file for a file upload).  Or None if the request is to have
+                no body.
+
+        Returns:
+            Deferred[twisted.web.iweb.IResponse]:
+                fires when the header of the response has been received (regardless of the
+                response status code). Fails if there is any problem which prevents that
+                response from being received (including problems that prevent the request
+                from being sent).
+        """
+
+        parsed_uri = URI.fromBytes(uri)
+        server_name_bytes = parsed_uri.netloc
+        host, port = parse_server_name(server_name_bytes.decode("ascii"))
+
+        # XXX disabling TLS is really only supported here for the benefit of the
+        # unit tests. We should make the UTs cope with TLS rather than having to make
+        # the code support the unit tests.
+        if self._tls_client_options_factory is None:
+            tls_options = None
+        else:
+            tls_options = self._tls_client_options_factory.get_options(host)
+
+        if port is not None:
+            target = (host, port)
+        else:
+            service_name = b"_matrix._tcp.%s" % (server_name_bytes, )
+            server_list = yield self._srv_resolver.resolve_service(service_name)
+            if not server_list:
+                target = (host, 8448)
+                logger.debug("No SRV record for %s, using %s", host, target)
+            else:
+                target = pick_server_from_list(server_list)
+
+        class EndpointFactory(object):
+            @staticmethod
+            def endpointForURI(_uri):
+                logger.info("Connecting to %s:%s", target[0], target[1])
+                ep = HostnameEndpoint(self._reactor, host=target[0], port=target[1])
+                if tls_options is not None:
+                    ep = wrapClientTLS(tls_options, ep)
+                return ep
+
+        agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
+        res = yield make_deferred_yieldable(
+            agent.request(method, uri, headers, bodyProducer)
+        )
+        defer.returnValue(res)
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
new file mode 100644
index 0000000000..71830c549d
--- /dev/null
+++ b/synapse/http/federation/srv_resolver.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import random
+import time
+
+import attr
+
+from twisted.internet import defer
+from twisted.internet.error import ConnectError
+from twisted.names import client, dns
+from twisted.names.error import DNSNameError, DomainError
+
+from synapse.util.logcontext import make_deferred_yieldable
+
+logger = logging.getLogger(__name__)
+
+SERVER_CACHE = {}
+
+
+@attr.s
+class Server(object):
+    """
+    Our record of an individual server which can be tried to reach a destination.
+
+    Attributes:
+        host (bytes): target hostname
+        port (int):
+        priority (int):
+        weight (int):
+        expires (int): when the cache should expire this record - in *seconds* since
+            the epoch
+    """
+    host = attr.ib()
+    port = attr.ib()
+    priority = attr.ib(default=0)
+    weight = attr.ib(default=0)
+    expires = attr.ib(default=0)
+
+
+def pick_server_from_list(server_list):
+    """Randomly choose a server from the server list
+
+    Args:
+        server_list (list[Server]): list of candidate servers
+
+    Returns:
+        Tuple[bytes, int]: (host, port) pair for the chosen server
+    """
+    if not server_list:
+        raise RuntimeError("pick_server_from_list called with empty list")
+
+    # TODO: currently we only use the lowest-priority servers. We should maintain a
+    # cache of servers known to be "down" and filter them out
+
+    min_priority = min(s.priority for s in server_list)
+    eligible_servers = list(s for s in server_list if s.priority == min_priority)
+    total_weight = sum(s.weight for s in eligible_servers)
+    target_weight = random.randint(0, total_weight)
+
+    for s in eligible_servers:
+        target_weight -= s.weight
+
+        if target_weight <= 0:
+            return s.host, s.port
+
+    # this should be impossible.
+    raise RuntimeError(
+        "pick_server_from_list got to end of eligible server list.",
+    )
+
+
+class SrvResolver(object):
+    """Interface to the dns client to do SRV lookups, with result caching.
+
+    The default resolver in twisted.names doesn't do any caching (it has a CacheResolver,
+    but the cache never gets populated), so we add our own caching layer here.
+
+    Args:
+        dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl
+        cache (dict): cache object
+        get_time (callable): clock implementation. Should return seconds since the epoch
+    """
+    def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time):
+        self._dns_client = dns_client
+        self._cache = cache
+        self._get_time = get_time
+
+    @defer.inlineCallbacks
+    def resolve_service(self, service_name):
+        """Look up a SRV record
+
+        Args:
+            service_name (bytes): record to look up
+
+        Returns:
+            Deferred[list[Server]]:
+                a list of the SRV records, or an empty list if none found
+        """
+        now = int(self._get_time())
+
+        if not isinstance(service_name, bytes):
+            raise TypeError("%r is not a byte string" % (service_name,))
+
+        cache_entry = self._cache.get(service_name, None)
+        if cache_entry:
+            if all(s.expires > now for s in cache_entry):
+                servers = list(cache_entry)
+                defer.returnValue(servers)
+
+        try:
+            answers, _, _ = yield make_deferred_yieldable(
+                self._dns_client.lookupService(service_name),
+            )
+        except DNSNameError:
+            # TODO: cache this. We can get the SOA out of the exception, and use
+            # the negative-TTL value.
+            defer.returnValue([])
+        except DomainError as e:
+            # We failed to resolve the name (other than a NameError)
+            # Try something in the cache, else rereaise
+            cache_entry = self._cache.get(service_name, None)
+            if cache_entry:
+                logger.warn(
+                    "Failed to resolve %r, falling back to cache. %r",
+                    service_name, e
+                )
+                defer.returnValue(list(cache_entry))
+            else:
+                raise e
+
+        if (len(answers) == 1
+                and answers[0].type == dns.SRV
+                and answers[0].payload
+                and answers[0].payload.target == dns.Name(b'.')):
+            raise ConnectError("Service %s unavailable" % service_name)
+
+        servers = []
+
+        for answer in answers:
+            if answer.type != dns.SRV or not answer.payload:
+                continue
+
+            payload = answer.payload
+
+            servers.append(Server(
+                host=payload.target.name,
+                port=payload.port,
+                priority=payload.priority,
+                weight=payload.weight,
+                expires=now + answer.ttl,
+            ))
+
+        self._cache[service_name] = list(servers)
+        defer.returnValue(servers)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index ea2fc64b99..980e912348 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -32,7 +32,7 @@ from twisted.internet import defer, protocol
 from twisted.internet.error import DNSLookupError
 from twisted.internet.task import _EPSILON, Cooperator
 from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
+from twisted.web.client import FileBodyProducer
 from twisted.web.http_headers import Headers
 
 import synapse.metrics
@@ -44,7 +44,7 @@ from synapse.api.errors import (
     RequestSendFailed,
     SynapseError,
 )
-from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
 from synapse.util.async_helpers import timeout_deferred
 from synapse.util.logcontext import make_deferred_yieldable
 from synapse.util.metrics import Measure
@@ -66,20 +66,6 @@ else:
     MAXINT = sys.maxint
 
 
-class MatrixFederationEndpointFactory(object):
-    def __init__(self, hs):
-        self.reactor = hs.get_reactor()
-        self.tls_client_options_factory = hs.tls_client_options_factory
-
-    def endpointForURI(self, uri):
-        destination = uri.netloc.decode('ascii')
-
-        return matrix_federation_endpoint(
-            self.reactor, destination, timeout=10,
-            tls_client_options_factory=self.tls_client_options_factory
-        )
-
-
 _next_id = 1
 
 
@@ -187,12 +173,10 @@ class MatrixFederationHttpClient(object):
         self.signing_key = hs.config.signing_key[0]
         self.server_name = hs.hostname
         reactor = hs.get_reactor()
-        pool = HTTPConnectionPool(reactor)
-        pool.retryAutomatically = False
-        pool.maxPersistentPerHost = 5
-        pool.cachedConnectionTimeout = 2 * 60
-        self.agent = Agent.usingEndpointFactory(
-            reactor, MatrixFederationEndpointFactory(hs), pool=pool
+
+        self.agent = MatrixFederationAgent(
+            hs.get_reactor(),
+            hs.tls_client_options_factory,
         )
         self.clock = hs.get_clock()
         self._store = hs.get_datastore()
@@ -316,34 +300,33 @@ class MatrixFederationHttpClient(object):
                     headers_dict[b"Authorization"] = auth_headers
 
                     logger.info(
-                        "{%s} [%s] Sending request: %s %s",
+                        "{%s} [%s] Sending request: %s %s; timeout %fs",
                         request.txn_id, request.destination, request.method,
-                        url_str,
-                    )
-
-                    # we don't want all the fancy cookie and redirect handling that
-                    # treq.request gives: just use the raw Agent.
-                    request_deferred = self.agent.request(
-                        method_bytes,
-                        url_bytes,
-                        headers=Headers(headers_dict),
-                        bodyProducer=producer,
-                    )
-
-                    request_deferred = timeout_deferred(
-                        request_deferred,
-                        timeout=_sec_timeout,
-                        reactor=self.hs.get_reactor(),
+                        url_str, _sec_timeout,
                     )
 
                     try:
                         with Measure(self.clock, "outbound_request"):
-                            response = yield make_deferred_yieldable(
+                            # we don't want all the fancy cookie and redirect handling
+                            # that treq.request gives: just use the raw Agent.
+                            request_deferred = self.agent.request(
+                                method_bytes,
+                                url_bytes,
+                                headers=Headers(headers_dict),
+                                bodyProducer=producer,
+                            )
+
+                            request_deferred = timeout_deferred(
                                 request_deferred,
+                                timeout=_sec_timeout,
+                                reactor=self.hs.get_reactor(),
                             )
+
+                            response = yield request_deferred
                     except DNSLookupError as e:
                         raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
                     except Exception as e:
+                        logger.info("Failed to send request: %s", e)
                         raise_from(RequestSendFailed(e, can_retry=True), e)
 
                     logger.info(
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 2fe790f95d..756721e304 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -40,7 +40,11 @@ REQUIREMENTS = [
     "signedjson>=1.0.0",
     "pynacl>=1.2.1",
     "service_identity>=16.0.0",
-    "Twisted>=17.1.0",
+
+    # our logcontext handling relies on the ability to cancel inlineCallbacks
+    # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
+    "Twisted>=18.7.0",
+
     "treq>=15.1",
     # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
     "pyopenssl>=16.0.0",
@@ -59,8 +63,11 @@ REQUIREMENTS = [
     # prometheus_client 0.4.0 changed the format of counter metrics
     # (cf https://github.com/matrix-org/synapse/issues/4001)
     "prometheus_client>=0.0.18,<0.4.0",
+
     # we use attr.s(slots), which arrived in 16.0.0
-    "attrs>=16.0.0",
+    # Twisted 18.7.0 requires attrs>=17.4.0
+    "attrs>=17.4.0",
+
     "netaddr>=0.7.18",
 ]
 
@@ -72,6 +79,10 @@ CONDITIONAL_REQUIREMENTS = {
     # ConsentResource uses select_autoescape, which arrived in jinja 2.9
     "resources.consent": ["Jinja2>=2.9"],
 
+    # ACME support is required to provision TLS certificates from authorities
+    # that use the protocol, such as Let's Encrypt.
+    "acme": ["txacme>=0.9.2"],
+
     "saml2": ["pysaml2>=4.5.0"],
     "url_preview": ["lxml>=3.5.0"],
     "test": ["mock>=2.0"],
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index aec0c6b075..7f812b8209 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -309,22 +309,16 @@ class RegisterRestServlet(RestServlet):
                 assigned_user_id=registered_user_id,
             )
 
-        # Only give msisdn flows if the x_show_msisdn flag is given:
-        # this is a hack to work around the fact that clients were shipped
-        # that use fallback registration if they see any flows that they don't
-        # recognise, which means we break registration for these clients if we
-        # advertise msisdn flows. Once usage of Riot iOS <=0.3.9 and Riot
-        # Android <=0.6.9 have fallen below an acceptable threshold, this
-        # parameter should go away and we should always advertise msisdn flows.
-        show_msisdn = False
-        if 'x_show_msisdn' in body and body['x_show_msisdn']:
-            show_msisdn = True
-
         # FIXME: need a better error than "no auth flow found" for scenarios
         # where we required 3PID for registration but the user didn't give one
         require_email = 'email' in self.hs.config.registrations_require_3pid
         require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid
 
+        show_msisdn = True
+        if self.hs.config.disable_msisdn_registration:
+            show_msisdn = False
+            require_msisdn = False
+
         flows = []
         if self.hs.config.enable_registration_captcha:
             # only support 3PIDless registration if no 3PIDs are required
@@ -422,8 +416,11 @@ class RegisterRestServlet(RestServlet):
             )
             # Necessary due to auth checks prior to the threepid being
             # written to the db
-            if is_threepid_reserved(self.hs.config, threepid):
-                yield self.store.upsert_monthly_active_user(registered_user_id)
+            if threepid:
+                if is_threepid_reserved(
+                    self.hs.config.mau_limits_reserved_threepids, threepid
+                ):
+                    yield self.store.upsert_monthly_active_user(registered_user_id)
 
             # remember that we've now registered that user account, and with
             #  what user ID (since the user may not have specified)
diff --git a/synapse/server.py b/synapse/server.py
index 9985687b95..c8914302cf 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -46,6 +46,7 @@ from synapse.federation.transport.client import TransportLayerClient
 from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
 from synapse.groups.groups_server import GroupsServerHandler
 from synapse.handlers import Handlers
+from synapse.handlers.acme import AcmeHandler
 from synapse.handlers.appservice import ApplicationServicesHandler
 from synapse.handlers.auth import AuthHandler, MacaroonGenerator
 from synapse.handlers.deactivate_account import DeactivateAccountHandler
@@ -129,6 +130,7 @@ class HomeServer(object):
         'sync_handler',
         'typing_handler',
         'room_list_handler',
+        'acme_handler',
         'auth_handler',
         'device_handler',
         'e2e_keys_handler',
@@ -310,6 +312,9 @@ class HomeServer(object):
     def build_e2e_room_keys_handler(self):
         return E2eRoomKeysHandler(self)
 
+    def build_acme_handler(self):
+        return AcmeHandler(self)
+
     def build_application_service_api(self):
         return ApplicationServiceApi(self)
 
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 865b5e915a..f62f70b9f1 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -26,6 +26,7 @@ from prometheus_client import Histogram
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.engines import PostgresEngine
 from synapse.util.caches.descriptors import Cache
 from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
@@ -192,6 +193,51 @@ class SQLBaseStore(object):
 
         self.database_engine = hs.database_engine
 
+        # A set of tables that are not safe to use native upserts in.
+        self._unsafe_to_upsert_tables = {"user_ips"}
+
+        if self.database_engine.can_native_upsert:
+            # Check ASAP (and then later, every 1s) to see if we have finished
+            # background updates of tables that aren't safe to update.
+            self._clock.call_later(
+                0.0,
+                run_as_background_process,
+                "upsert_safety_check",
+                self._check_safe_to_upsert
+            )
+
+    @defer.inlineCallbacks
+    def _check_safe_to_upsert(self):
+        """
+        Is it safe to use native UPSERT?
+
+        If there are background updates, we will need to wait, as they may be
+        the addition of indexes that set the UNIQUE constraint that we require.
+
+        If the background updates have not completed, wait 15 sec and check again.
+        """
+        updates = yield self._simple_select_list(
+            "background_updates",
+            keyvalues=None,
+            retcols=["update_name"],
+            desc="check_background_updates",
+        )
+        updates = [x["update_name"] for x in updates]
+
+        # The User IPs table in schema #53 was missing a unique index, which we
+        # run as a background update.
+        if "user_ips_device_unique_index" not in updates:
+            self._unsafe_to_upsert_tables.discard("user_ips")
+
+        # If there's any tables left to check, reschedule to run.
+        if self._unsafe_to_upsert_tables:
+            self._clock.call_later(
+                15.0,
+                run_as_background_process,
+                "upsert_safety_check",
+                self._check_safe_to_upsert
+            )
+
     def start_profiling(self):
         self._previous_loop_ts = self._clock.time_msec()
 
@@ -494,8 +540,15 @@ class SQLBaseStore(object):
         txn.executemany(sql, vals)
 
     @defer.inlineCallbacks
-    def _simple_upsert(self, table, keyvalues, values,
-                       insertion_values={}, desc="_simple_upsert", lock=True):
+    def _simple_upsert(
+        self,
+        table,
+        keyvalues,
+        values,
+        insertion_values={},
+        desc="_simple_upsert",
+        lock=True
+    ):
         """
 
         `lock` should generally be set to True (the default), but can be set
@@ -516,16 +569,21 @@ class SQLBaseStore(object):
                 inserting
             lock (bool): True to lock the table when doing the upsert.
         Returns:
-            Deferred(bool): True if a new entry was created, False if an
-                existing one was updated.
+            Deferred(None or bool): Native upserts always return None. Emulated
+            upserts return True if a new entry was created, False if an existing
+            one was updated.
         """
         attempts = 0
         while True:
             try:
                 result = yield self.runInteraction(
                     desc,
-                    self._simple_upsert_txn, table, keyvalues, values, insertion_values,
-                    lock=lock
+                    self._simple_upsert_txn,
+                    table,
+                    keyvalues,
+                    values,
+                    insertion_values,
+                    lock=lock,
                 )
                 defer.returnValue(result)
             except self.database_engine.module.IntegrityError as e:
@@ -537,12 +595,71 @@ class SQLBaseStore(object):
 
                 # presumably we raced with another transaction: let's retry.
                 logger.warn(
-                    "IntegrityError when upserting into %s; retrying: %s",
-                    table, e
+                    "%s when upserting into %s; retrying: %s", e.__name__, table, e
                 )
 
-    def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
-                           lock=True):
+    def _simple_upsert_txn(
+        self,
+        txn,
+        table,
+        keyvalues,
+        values,
+        insertion_values={},
+        lock=True,
+    ):
+        """
+        Pick the UPSERT method which works best on the platform. Either the
+        native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
+
+        Args:
+            txn: The transaction to use.
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+            lock (bool): True to lock the table when doing the upsert.
+        Returns:
+            None or bool: Native upserts always return None. Emulated
+            upserts return True if a new entry was created, False if an existing
+            one was updated.
+        """
+        if (
+            self.database_engine.can_native_upsert
+            and table not in self._unsafe_to_upsert_tables
+        ):
+            return self._simple_upsert_txn_native_upsert(
+                txn,
+                table,
+                keyvalues,
+                values,
+                insertion_values=insertion_values,
+            )
+        else:
+            return self._simple_upsert_txn_emulated(
+                txn,
+                table,
+                keyvalues,
+                values,
+                insertion_values=insertion_values,
+                lock=lock,
+            )
+
+    def _simple_upsert_txn_emulated(
+        self, txn, table, keyvalues, values, insertion_values={}, lock=True
+    ):
+        """
+        Args:
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+            lock (bool): True to lock the table when doing the upsert.
+        Returns:
+            bool: Return True if a new entry was created, False if an existing
+            one was updated.
+        """
         # We need to lock the table :(, unless we're *really* careful
         if lock:
             self.database_engine.lock_table(txn, table)
@@ -577,12 +694,44 @@ class SQLBaseStore(object):
         sql = "INSERT INTO %s (%s) VALUES (%s)" % (
             table,
             ", ".join(k for k in allvalues),
-            ", ".join("?" for _ in allvalues)
+            ", ".join("?" for _ in allvalues),
         )
         txn.execute(sql, list(allvalues.values()))
         # successfully inserted
         return True
 
+    def _simple_upsert_txn_native_upsert(
+        self, txn, table, keyvalues, values, insertion_values={}
+    ):
+        """
+        Use the native UPSERT functionality in recent PostgreSQL versions.
+
+        Args:
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+        Returns:
+            None
+        """
+        allvalues = {}
+        allvalues.update(keyvalues)
+        allvalues.update(values)
+        allvalues.update(insertion_values)
+
+        sql = (
+            "INSERT INTO %s (%s) VALUES (%s) "
+            "ON CONFLICT (%s) DO UPDATE SET %s"
+        ) % (
+            table,
+            ", ".join(k for k in allvalues),
+            ", ".join("?" for _ in allvalues),
+            ", ".join(k for k in keyvalues),
+            ", ".join(k + "=EXCLUDED." + k for k in values),
+        )
+        txn.execute(sql, list(allvalues.values()))
+
     def _simple_select_one(self, table, keyvalues, retcols,
                            allow_none=False, desc="_simple_select_one"):
         """Executes a SELECT query on the named table, which is expected to
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index 5d548f250a..091d7116c5 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -110,8 +110,13 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
 
     @defer.inlineCallbacks
     def _remove_user_ip_dupes(self, progress, batch_size):
+        # This works function works by scanning the user_ips table in batches
+        # based on `last_seen`. For each row in a batch it searches the rest of
+        # the table to see if there are any duplicates, if there are then they
+        # are removed and replaced with a suitable row.
 
-        last_seen_progress = progress.get("last_seen", 0)
+        # Fetch the start of the batch
+        begin_last_seen = progress.get("last_seen", 0)
 
         def get_last_seen(txn):
             txn.execute(
@@ -122,29 +127,28 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                 LIMIT 1
                 OFFSET ?
                 """,
-                (last_seen_progress, batch_size)
+                (begin_last_seen, batch_size)
             )
-            results = txn.fetchone()
-            return results
-
-        # Get a last seen that's sufficiently far away enough from the last one
-        last_seen = yield self.runInteraction(
+            row = txn.fetchone()
+            if row:
+                return row[0]
+            else:
+                return None
+
+        # Get a last seen that has roughly `batch_size` since `begin_last_seen`
+        end_last_seen = yield self.runInteraction(
             "user_ips_dups_get_last_seen", get_last_seen
         )
 
-        if not last_seen:
-            # If we get a None then we're reaching the end and just need to
-            # delete the last batch.
-            last = True
+        # If it returns None, then we're processing the last batch
+        last = end_last_seen is None
 
-            # We fake not having an upper bound by using a future date, by
-            # just multiplying the current time by two....
-            last_seen = int(self.clock.time_msec()) * 2
-        else:
-            last = False
-            last_seen = last_seen[0]
+        logger.info(
+            "Scanning for duplicate 'user_ips' rows in range: %s <= last_seen < %s",
+            begin_last_seen, end_last_seen,
+        )
 
-        def remove(txn, last_seen_progress, last_seen):
+        def remove(txn):
             # This works by looking at all entries in the given time span, and
             # then for each (user_id, access_token, ip) tuple in that range
             # checking for any duplicates in the rest of the table (via a join).
@@ -153,6 +157,16 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             # all other duplicates.
             # It is efficient due to the existence of (user_id, access_token,
             # ip) and (last_seen) indices.
+
+            # Define the search space, which requires handling the last batch in
+            # a different way
+            if last:
+                clause = "? <= last_seen"
+                args = (begin_last_seen,)
+            else:
+                clause = "? <= last_seen AND last_seen < ?"
+                args = (begin_last_seen, end_last_seen)
+
             txn.execute(
                 """
                 SELECT user_id, access_token, ip,
@@ -160,13 +174,13 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                 FROM (
                     SELECT user_id, access_token, ip
                     FROM user_ips
-                    WHERE ? <= last_seen AND last_seen < ?
-                    ORDER BY last_seen
+                    WHERE {}
                 ) c
                 INNER JOIN user_ips USING (user_id, access_token, ip)
                 GROUP BY user_id, access_token, ip
-                HAVING count(*) > 1""",
-                (last_seen_progress, last_seen)
+                HAVING count(*) > 1
+                """.format(clause),
+                args
             )
             res = txn.fetchall()
 
@@ -194,12 +208,11 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                 )
 
             self._background_update_progress_txn(
-                txn, "user_ips_remove_dupes", {"last_seen": last_seen}
+                txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
             )
 
-        yield self.runInteraction(
-            "user_ips_dups_remove", remove, last_seen_progress, last_seen
-        )
+        yield self.runInteraction("user_ips_dups_remove", remove)
+
         if last:
             yield self._end_background_update("user_ips_remove_dupes")
 
@@ -244,7 +257,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         )
 
     def _update_client_ips_batch_txn(self, txn, to_update):
-        self.database_engine.lock_table(txn, "user_ips")
+        if "user_ips" in self._unsafe_to_upsert_tables or (
+            not self.database_engine.can_native_upsert
+        ):
+            self.database_engine.lock_table(txn, "user_ips")
 
         for entry in iteritems(to_update):
             (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index e2f9de8451..ff5ef97ca8 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -18,7 +18,7 @@ import platform
 
 from ._base import IncorrectDatabaseSetup
 from .postgres import PostgresEngine
-from .sqlite3 import Sqlite3Engine
+from .sqlite import Sqlite3Engine
 
 SUPPORTED_MODULE = {
     "sqlite3": Sqlite3Engine,
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 42225f8a2a..4004427c7b 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -38,6 +38,13 @@ class PostgresEngine(object):
         return sql.replace("?", "%s")
 
     def on_new_connection(self, db_conn):
+
+        # Get the version of PostgreSQL that we're using. As per the psycopg2
+        # docs: The number is formed by converting the major, minor, and
+        # revision numbers into two-decimal-digit numbers and appending them
+        # together. For example, version 8.1.5 will be returned as 80105
+        self._version = db_conn.server_version
+
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -54,6 +61,13 @@ class PostgresEngine(object):
 
         cursor.close()
 
+    @property
+    def can_native_upsert(self):
+        """
+        Can we use native UPSERTs? This requires PostgreSQL 9.5+.
+        """
+        return self._version >= 90500
+
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
             # https://www.postgresql.org/docs/current/static/errcodes-appendix.html
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite.py
index 19949fc474..c64d73ff21 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite.py
@@ -15,6 +15,7 @@
 
 import struct
 import threading
+from sqlite3 import sqlite_version_info
 
 from synapse.storage.prepare_database import prepare_database
 
@@ -30,6 +31,14 @@ class Sqlite3Engine(object):
         self._current_state_group_id = None
         self._current_state_group_id_lock = threading.Lock()
 
+    @property
+    def can_native_upsert(self):
+        """
+        Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
+        more work we haven't done yet to tell what was inserted vs updated.
+        """
+        return sqlite_version_info >= (3, 24, 0)
+
     def check_database(self, txn):
         pass
 
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 2047110b1d..3e1915fb87 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -739,7 +739,18 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
         }
 
         events_map = {ev.event_id: ev for ev, _ in events_context}
-        room_version = yield self.get_room_version(room_id)
+
+        # We need to get the room version, which is in the create event.
+        # Normally that'd be in the database, but its also possible that we're
+        # currently trying to persist it.
+        room_version = None
+        for ev, _ in events_context:
+            if ev.type == EventTypes.Create and ev.state_key == "":
+                room_version = ev.content.get("room_version", "1")
+                break
+
+        if not room_version:
+            room_version = yield self.get_room_version(room_id)
 
         logger.debug("calling resolve_state_groups from preserve_events")
         res = yield self._state_resolution_handler.resolve_state_groups(
@@ -1257,6 +1268,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
                         event.internal_metadata.get_dict()
                     ),
                     "json": encode_json(event_dict(event)),
+                    "format_version": event.format_version,
                 }
                 for event, _ in events_and_contexts
             ],
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index a8326f5296..599f892858 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -21,10 +21,10 @@ from canonicaljson import json
 
 from twisted.internet import defer
 
+from synapse.api.constants import EventFormatVersions
 from synapse.api.errors import NotFoundError
-# these are only included to make the type annotations work
-from synapse.events import EventBase  # noqa: F401
 from synapse.events import FrozenEvent
+# these are only included to make the type annotations work
 from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.events.utils import prune_event
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -353,6 +353,7 @@ class EventsWorkerStore(SQLBaseStore):
                     self._get_event_from_row,
                     row["internal_metadata"], row["json"], row["redacts"],
                     rejected_reason=row["rejects"],
+                    format_version=row["format_version"],
                 )
                 for row in rows
             ],
@@ -377,6 +378,7 @@ class EventsWorkerStore(SQLBaseStore):
                 " e.event_id as event_id, "
                 " e.internal_metadata,"
                 " e.json,"
+                " e.format_version, "
                 " r.redacts as redacts,"
                 " rej.event_id as rejects "
                 " FROM event_json as e"
@@ -392,7 +394,7 @@ class EventsWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def _get_event_from_row(self, internal_metadata, js, redacted,
-                            rejected_reason=None):
+                            format_version, rejected_reason=None):
         with Measure(self._clock, "_get_event_from_row"):
             d = json.loads(js)
             internal_metadata = json.loads(internal_metadata)
@@ -405,8 +407,17 @@ class EventsWorkerStore(SQLBaseStore):
                     desc="_get_event_from_row_rejected_reason",
                 )
 
+            if format_version is None:
+                # This means that we stored the event before we had the concept
+                # of a event format version, so it must be a V1 event.
+                format_version = EventFormatVersions.V1
+
+            # TODO: When we implement new event formats we'll need to use a
+            # different event python type
+            assert format_version == EventFormatVersions.V1
+
             original_ev = FrozenEvent(
-                d,
+                event_dict=d,
                 internal_metadata_dict=internal_metadata,
                 rejected_reason=rejected_reason,
             )
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 2743b52bad..134297e284 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -215,7 +215,7 @@ class PusherStore(PusherWorkerStore):
         with self._pushers_id_gen.get_next() as stream_id:
             # no need to lock because `pushers` has a unique key on
             # (app_id, pushkey, user_name) so _simple_upsert will retry
-            newly_inserted = yield self._simple_upsert(
+            yield self._simple_upsert(
                 table="pushers",
                 keyvalues={
                     "app_id": app_id,
@@ -238,7 +238,12 @@ class PusherStore(PusherWorkerStore):
                 lock=False,
             )
 
-            if newly_inserted:
+            user_has_pusher = self.get_if_user_has_pusher.cache.get(
+                (user_id,), None, update_metrics=False
+            )
+
+            if user_has_pusher is not True:
+                # invalidate, since we the user might not have had a pusher before
                 yield self.runInteraction(
                     "add_pusher",
                     self._invalidate_cache_and_stream,
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 0707f9a86a..592c1bcd33 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -588,12 +588,12 @@ class RoomMemberStore(RoomMemberWorkerStore):
             )
 
             # We update the local_invites table only if the event is "current",
-            # i.e., its something that has just happened.
-            # The only current event that can also be an outlier is if its an
-            # invite that has come in across federation.
+            # i.e., its something that has just happened. If the event is an
+            # outlier it is only current if its an "out of band membership",
+            # like a remote invite or a rejection of a remote invite.
             is_new_state = not backfilled and (
                 not event.internal_metadata.is_outlier()
-                or event.internal_metadata.is_invite_from_remote()
+                or event.internal_metadata.is_out_of_band_membership()
             )
             is_mine = self.hs.is_mine_id(event.state_key)
             if is_new_state and is_mine:
diff --git a/synapse/storage/schema/delta/53/event_format_version.sql b/synapse/storage/schema/delta/53/event_format_version.sql
new file mode 100644
index 0000000000..1d977c2834
--- /dev/null
+++ b/synapse/storage/schema/delta/53/event_format_version.sql
@@ -0,0 +1,16 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE event_json ADD COLUMN format_version INTEGER;
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index a8781b0e5d..ce48212265 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -168,14 +168,14 @@ class UserDirectoryStore(SQLBaseStore):
             if isinstance(self.database_engine, PostgresEngine):
                 # We weight the localpart most highly, then display name and finally
                 # server name
-                if new_entry:
+                if self.database_engine.can_native_upsert:
                     sql = """
                         INSERT INTO user_directory_search(user_id, vector)
                         VALUES (?,
                             setweight(to_tsvector('english', ?), 'A')
                             || setweight(to_tsvector('english', ?), 'D')
                             || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
-                        )
+                        ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector
                     """
                     txn.execute(
                         sql,
@@ -185,20 +185,45 @@ class UserDirectoryStore(SQLBaseStore):
                         )
                     )
                 else:
-                    sql = """
-                        UPDATE user_directory_search
-                        SET vector = setweight(to_tsvector('english', ?), 'A')
-                            || setweight(to_tsvector('english', ?), 'D')
-                            || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
-                        WHERE user_id = ?
-                    """
-                    txn.execute(
-                        sql,
-                        (
-                            get_localpart_from_id(user_id), get_domain_from_id(user_id),
-                            display_name, user_id,
+                    # TODO: Remove this code after we've bumped the minimum version
+                    # of postgres to always support upserts, so we can get rid of
+                    # `new_entry` usage
+                    if new_entry is True:
+                        sql = """
+                            INSERT INTO user_directory_search(user_id, vector)
+                            VALUES (?,
+                                setweight(to_tsvector('english', ?), 'A')
+                                || setweight(to_tsvector('english', ?), 'D')
+                                || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
+                            )
+                        """
+                        txn.execute(
+                            sql,
+                            (
+                                user_id, get_localpart_from_id(user_id),
+                                get_domain_from_id(user_id), display_name,
+                            )
+                        )
+                    elif new_entry is False:
+                        sql = """
+                            UPDATE user_directory_search
+                            SET vector = setweight(to_tsvector('english', ?), 'A')
+                                || setweight(to_tsvector('english', ?), 'D')
+                                || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
+                            WHERE user_id = ?
+                        """
+                        txn.execute(
+                            sql,
+                            (
+                                get_localpart_from_id(user_id),
+                                get_domain_from_id(user_id),
+                                display_name, user_id,
+                            )
+                        )
+                    else:
+                        raise RuntimeError(
+                            "upsert returned None when 'can_native_upsert' is False"
                         )
-                    )
             elif isinstance(self.database_engine, Sqlite3Engine):
                 value = "%s %s" % (user_id, display_name,) if display_name else user_id
                 self._simple_upsert_txn(
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index ec7b2c9672..430bb15f51 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -387,12 +387,14 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
     deferred that wraps and times out the given deferred, correctly handling
     the case where the given deferred's canceller throws.
 
+    (See https://twistedmatrix.com/trac/ticket/9534)
+
     NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred
 
     Args:
         deferred (Deferred)
         timeout (float): Timeout in seconds
-        reactor (twisted.internet.reactor): The twisted reactor to use
+        reactor (twisted.interfaces.IReactorTime): The twisted reactor to use
         on_timeout_cancel (callable): A callable which is called immediately
             after the deferred times out, and not if this deferred is
             otherwise cancelled before the timeout.
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 0c23068bcf..b5ad99348d 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -51,7 +51,6 @@ class ConfigGenerationTestCase(unittest.TestCase):
                     "lemurs.win.log.config",
                     "lemurs.win.signing.key",
                     "lemurs.win.tls.crt",
-                    "lemurs.win.tls.dh",
                     "lemurs.win.tls.key",
                 ]
             ),
diff --git a/tests/http/federation/__init__.py b/tests/http/federation/__init__.py
new file mode 100644
index 0000000000..1453d04571
--- /dev/null
+++ b/tests/http/federation/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
new file mode 100644
index 0000000000..b32d7566a5
--- /dev/null
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from mock import Mock
+
+import treq
+
+from twisted.internet import defer
+from twisted.internet.protocol import Factory
+from twisted.protocols.tls import TLSMemoryBIOFactory
+from twisted.test.ssl_helpers import ServerTLSContext
+from twisted.web.http import HTTPChannel
+
+from synapse.crypto.context_factory import ClientTLSOptionsFactory
+from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
+from synapse.http.federation.srv_resolver import Server
+from synapse.util.logcontext import LoggingContext
+
+from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.unittest import TestCase
+
+logger = logging.getLogger(__name__)
+
+
+class MatrixFederationAgentTests(TestCase):
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+
+        self.mock_resolver = Mock()
+
+        self.agent = MatrixFederationAgent(
+            reactor=self.reactor,
+            tls_client_options_factory=ClientTLSOptionsFactory(None),
+            _srv_resolver=self.mock_resolver,
+        )
+
+    def _make_connection(self, client_factory, expected_sni):
+        """Builds a test server, and completes the outgoing client connection
+
+        Returns:
+            HTTPChannel: the test server
+        """
+
+        # build the test server
+        server_tls_protocol = _build_test_server()
+
+        # now, tell the client protocol factory to build the client protocol (it will be a
+        # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an
+        # HTTP11ClientProtocol) and wire the output of said protocol up to the server via
+        # a FakeTransport.
+        #
+        # Normally this would be done by the TCP socket code in Twisted, but we are
+        # stubbing that out here.
+        client_protocol = client_factory.buildProtocol(None)
+        client_protocol.makeConnection(FakeTransport(server_tls_protocol, self.reactor))
+
+        # tell the server tls protocol to send its stuff back to the client, too
+        server_tls_protocol.makeConnection(FakeTransport(client_protocol, self.reactor))
+
+        # give the reactor a pump to get the TLS juices flowing.
+        self.reactor.pump((0.1,))
+
+        # check the SNI
+        server_name = server_tls_protocol._tlsConnection.get_servername()
+        self.assertEqual(
+            server_name,
+            expected_sni,
+            "Expected SNI %s but got %s" % (expected_sni, server_name),
+        )
+
+        # fish the test server back out of the server-side TLS protocol.
+        return server_tls_protocol.wrappedProtocol
+
+    @defer.inlineCallbacks
+    def _make_get_request(self, uri):
+        """
+        Sends a simple GET request via the agent, and checks its logcontext management
+        """
+        with LoggingContext("one") as context:
+            fetch_d = self.agent.request(b'GET', uri)
+
+            # Nothing happened yet
+            self.assertNoResult(fetch_d)
+
+            # should have reset logcontext to the sentinel
+            _check_logcontext(LoggingContext.sentinel)
+
+            try:
+                fetch_res = yield fetch_d
+                defer.returnValue(fetch_res)
+            finally:
+                _check_logcontext(context)
+
+    def test_get(self):
+        """
+        happy-path test of a GET request with an explicit port
+        """
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+        test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b"testserv",
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        content = request.content.read()
+        self.assertEqual(content, b'')
+
+        # Deferred is still without a result
+        self.assertNoResult(test_d)
+
+        # send the headers
+        request.responseHeaders.setRawHeaders(b'Content-Type', [b'application/json'])
+        request.write('')
+
+        self.reactor.pump((0.1,))
+
+        response = self.successResultOf(test_d)
+
+        # that should give us a Response object
+        self.assertEqual(response.code, 200)
+
+        # Send the body
+        request.write('{ "a": 1 }'.encode('ascii'))
+        request.finish()
+
+        self.reactor.pump((0.1,))
+
+        # check it can be read
+        json = self.successResultOf(treq.json_content(response))
+        self.assertEqual(json, {"a": 1})
+
+    def test_get_ip_address(self):
+        """
+        Test the behaviour when the server name contains an explicit IP (with no port)
+        """
+
+        # the SRV lookup will return an empty list (XXX: why do we even do an SRV lookup?)
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+
+        # then there will be a getaddrinfo on the IP
+        self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.1.2.3.4",
+        )
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=None,
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_hostname_no_srv(self):
+        """
+        Test the behaviour when the server name has no port, and no SRV record
+        """
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.testserv",
+        )
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'testserv',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_hostname_srv(self):
+        """
+        Test the behaviour when there is a single SRV record
+        """
+        self.mock_resolver.resolve_service.side_effect = lambda _: [
+            Server(host="srvtarget", port=8443)
+        ]
+        self.reactor.lookups["srvtarget"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.testserv",
+        )
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8443)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'testserv',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+
+def _check_logcontext(context):
+    current = LoggingContext.current_context()
+    if current is not context:
+        raise AssertionError(
+            "Expected logcontext %s but was %s" % (context, current),
+        )
+
+
+def _build_test_server():
+    """Construct a test server
+
+    This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol
+
+    Returns:
+        TLSMemoryBIOProtocol
+    """
+    server_factory = Factory.forProtocol(HTTPChannel)
+    # Request.finish expects the factory to have a 'log' method.
+    server_factory.log = _log_request
+
+    server_tls_factory = TLSMemoryBIOFactory(
+        ServerTLSContext(), isClient=False, wrappedFactory=server_factory,
+    )
+
+    return server_tls_factory.buildProtocol(None)
+
+
+def _log_request(request):
+    """Implements Factory.log, which is expected by Request.finish"""
+    logger.info("Completed request %s", request)
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
new file mode 100644
index 0000000000..a872e2441e
--- /dev/null
+++ b/tests/http/federation/test_srv_resolver.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet import defer
+from twisted.internet.defer import Deferred
+from twisted.internet.error import ConnectError
+from twisted.names import dns, error
+
+from synapse.http.federation.srv_resolver import SrvResolver
+from synapse.util.logcontext import LoggingContext
+
+from tests import unittest
+from tests.utils import MockClock
+
+
+class SrvResolverTestCase(unittest.TestCase):
+    def test_resolve(self):
+        dns_client_mock = Mock()
+
+        service_name = b"test_service.example.com"
+        host_name = b"example.com"
+
+        answer_srv = dns.RRHeader(
+            type=dns.SRV, payload=dns.Record_SRV(target=host_name)
+        )
+
+        result_deferred = Deferred()
+        dns_client_mock.lookupService.return_value = result_deferred
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        @defer.inlineCallbacks
+        def do_lookup():
+
+            with LoggingContext("one") as ctx:
+                resolve_d = resolver.resolve_service(service_name)
+
+                self.assertNoResult(resolve_d)
+
+                # should have reset to the sentinel context
+                self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+
+                result = yield resolve_d
+
+                # should have restored our context
+                self.assertIs(LoggingContext.current_context(), ctx)
+
+                defer.returnValue(result)
+
+        test_d = do_lookup()
+        self.assertNoResult(test_d)
+
+        dns_client_mock.lookupService.assert_called_once_with(service_name)
+
+        result_deferred.callback(
+            ([answer_srv], None, None)
+        )
+
+        servers = self.successResultOf(test_d)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+        self.assertEquals(servers[0].host, host_name)
+
+    @defer.inlineCallbacks
+    def test_from_cache_expired_and_dns_fail(self):
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
+
+        service_name = b"test_service.example.com"
+
+        entry = Mock(spec_set=["expires"])
+        entry.expires = 0
+
+        cache = {service_name: [entry]}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        servers = yield resolver.resolve_service(service_name)
+
+        dns_client_mock.lookupService.assert_called_once_with(service_name)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+
+    @defer.inlineCallbacks
+    def test_from_cache(self):
+        clock = MockClock()
+
+        dns_client_mock = Mock(spec_set=['lookupService'])
+        dns_client_mock.lookupService = Mock(spec_set=[])
+
+        service_name = b"test_service.example.com"
+
+        entry = Mock(spec_set=["expires"])
+        entry.expires = 999999999
+
+        cache = {service_name: [entry]}
+        resolver = SrvResolver(
+            dns_client=dns_client_mock, cache=cache, get_time=clock.time,
+        )
+
+        servers = yield resolver.resolve_service(service_name)
+
+        self.assertFalse(dns_client_mock.lookupService.called)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+
+    @defer.inlineCallbacks
+    def test_empty_cache(self):
+        dns_client_mock = Mock()
+
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
+
+        service_name = b"test_service.example.com"
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        with self.assertRaises(error.DNSServerError):
+            yield resolver.resolve_service(service_name)
+
+    @defer.inlineCallbacks
+    def test_name_error(self):
+        dns_client_mock = Mock()
+
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSNameError())
+
+        service_name = b"test_service.example.com"
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        servers = yield resolver.resolve_service(service_name)
+
+        self.assertEquals(len(servers), 0)
+        self.assertEquals(len(cache), 0)
+
+    def test_disabled_service(self):
+        """
+        test the behaviour when there is a single record which is ".".
+        """
+        service_name = b"test_service.example.com"
+
+        lookup_deferred = Deferred()
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = lookup_deferred
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        resolve_d = resolver.resolve_service(service_name)
+        self.assertNoResult(resolve_d)
+
+        # returning a single "." should make the lookup fail with a ConenctError
+        lookup_deferred.callback((
+            [dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"."))],
+            None,
+            None,
+        ))
+
+        self.failureResultOf(resolve_d, ConnectError)
+
+    def test_non_srv_answer(self):
+        """
+        test the behaviour when the dns server gives us a spurious non-SRV response
+        """
+        service_name = b"test_service.example.com"
+
+        lookup_deferred = Deferred()
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = lookup_deferred
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        resolve_d = resolver.resolve_service(service_name)
+        self.assertNoResult(resolve_d)
+
+        lookup_deferred.callback((
+            [
+                dns.RRHeader(type=dns.A, payload=dns.Record_A()),
+                dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"host")),
+            ],
+            None,
+            None,
+        ))
+
+        servers = self.successResultOf(resolve_d)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+        self.assertEquals(servers[0].host, b"host")
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index b2e38276d8..d37f8f9981 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -15,8 +15,10 @@
 
 from mock import Mock
 
+from twisted.internet import defer
 from twisted.internet.defer import TimeoutError
 from twisted.internet.error import ConnectingCancelledError, DNSLookupError
+from twisted.test.proto_helpers import StringTransport
 from twisted.web.client import ResponseNeverReceived
 from twisted.web.http import HTTPChannel
 
@@ -25,11 +27,20 @@ from synapse.http.matrixfederationclient import (
     MatrixFederationHttpClient,
     MatrixFederationRequest,
 )
+from synapse.util.logcontext import LoggingContext
 
 from tests.server import FakeTransport
 from tests.unittest import HomeserverTestCase
 
 
+def check_logcontext(context):
+    current = LoggingContext.current_context()
+    if current is not context:
+        raise AssertionError(
+            "Expected logcontext %s but was %s" % (context, current),
+        )
+
+
 class FederationClientTests(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
 
@@ -42,9 +53,73 @@ class FederationClientTests(HomeserverTestCase):
         self.cl = MatrixFederationHttpClient(self.hs)
         self.reactor.lookups["testserv"] = "1.2.3.4"
 
+    def test_client_get(self):
+        """
+        happy-path test of a GET request
+        """
+        @defer.inlineCallbacks
+        def do_request():
+            with LoggingContext("one") as context:
+                fetch_d = self.cl.get_json("testserv:8008", "foo/bar")
+
+                # Nothing happened yet
+                self.assertNoResult(fetch_d)
+
+                # should have reset logcontext to the sentinel
+                check_logcontext(LoggingContext.sentinel)
+
+                try:
+                    fetch_res = yield fetch_d
+                    defer.returnValue(fetch_res)
+                finally:
+                    check_logcontext(context)
+
+        test_d = do_request()
+
+        self.pump()
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8008)
+
+        # complete the connection and wire it up to a fake transport
+        protocol = factory.buildProtocol(None)
+        transport = StringTransport()
+        protocol.makeConnection(transport)
+
+        # that should have made it send the request to the transport
+        self.assertRegex(transport.value(), b"^GET /foo/bar")
+
+        # Deferred is still without a result
+        self.assertNoResult(test_d)
+
+        # Send it the HTTP response
+        res_json = '{ "a": 1 }'.encode('ascii')
+        protocol.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Server: Fake\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: %i\r\n"
+            b"\r\n"
+            b"%s" % (len(res_json), res_json)
+        )
+
+        self.pump()
+
+        res = self.successResultOf(test_d)
+
+        # check the response is as expected
+        self.assertEqual(res, {"a": 1})
+
     def test_dns_error(self):
         """
-        If the DNS raising returns an error, it will bubble up.
+        If the DNS lookup returns an error, it will bubble up.
         """
         d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
         self.pump()
@@ -53,6 +128,28 @@ class FederationClientTests(HomeserverTestCase):
         self.assertIsInstance(f.value, RequestSendFailed)
         self.assertIsInstance(f.value.inner_exception, DNSLookupError)
 
+    def test_client_connection_refused(self):
+        d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+
+        self.pump()
+
+        # Nothing happened yet
+        self.assertNoResult(d)
+
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8008)
+        e = Exception("go away")
+        factory.clientConnectionFailed(None, e)
+        self.pump(0.5)
+
+        f = self.failureResultOf(d)
+
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIs(f.value.inner_exception, e)
+
     def test_client_never_connect(self):
         """
         If the HTTP request is not connected and is timed out, it'll give a
@@ -63,7 +160,7 @@ class FederationClientTests(HomeserverTestCase):
         self.pump()
 
         # Nothing happened yet
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Make sure treq is trying to connect
         clients = self.reactor.tcpClients
@@ -72,7 +169,7 @@ class FederationClientTests(HomeserverTestCase):
         self.assertEqual(clients[0][1], 8008)
 
         # Deferred is still without a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Push by enough to time it out
         self.reactor.advance(10.5)
@@ -94,7 +191,7 @@ class FederationClientTests(HomeserverTestCase):
         self.pump()
 
         # Nothing happened yet
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Make sure treq is trying to connect
         clients = self.reactor.tcpClients
@@ -107,7 +204,7 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred is still without a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Push by enough to time it out
         self.reactor.advance(10.5)
@@ -135,7 +232,7 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred does not have a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Send it the HTTP response
         client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
@@ -159,7 +256,7 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred does not have a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Send it the HTTP response
         client.dataReceived(
@@ -195,3 +292,42 @@ class FederationClientTests(HomeserverTestCase):
         request = server.requests[0]
         content = request.content.read()
         self.assertEqual(content, b'{"a":"b"}')
+
+    def test_closes_connection(self):
+        """Check that the client closes unused HTTP connections"""
+        d = self.cl.get_json("testserv:8008", "foo/bar")
+
+        self.pump()
+
+        # there should have been a call to connectTCP
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (_host, _port, factory, _timeout, _bindAddress) = clients[0]
+
+        # complete the connection and wire it up to a fake transport
+        client = factory.buildProtocol(None)
+        conn = StringTransport()
+        client.makeConnection(conn)
+
+        # that should have made it send the request to the connection
+        self.assertRegex(conn.value(), b"^GET /foo/bar")
+
+        # Send the HTTP response
+        client.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: 2\r\n"
+            b"\r\n"
+            b"{}"
+        )
+
+        # We should get a successful response
+        r = self.successResultOf(d)
+        self.assertEqual(r, {})
+
+        self.assertFalse(conn.disconnecting)
+
+        # wait for a while
+        self.pump(120)
+
+        self.assertTrue(conn.disconnecting)
diff --git a/tests/server.py b/tests/server.py
index db43fa0db8..ed2a046ae6 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -1,4 +1,5 @@
 import json
+import logging
 from io import BytesIO
 
 from six import text_type
@@ -22,6 +23,8 @@ from synapse.util import Clock
 
 from tests.utils import setup_test_homeserver as _sth
 
+logger = logging.getLogger(__name__)
+
 
 class TimedOutException(Exception):
     """
@@ -339,7 +342,7 @@ def get_clock():
     return (clock, hs_clock)
 
 
-@attr.s
+@attr.s(cmp=False)
 class FakeTransport(object):
     """
     A twisted.internet.interfaces.ITransport implementation which sends all its data
@@ -414,6 +417,11 @@ class FakeTransport(object):
         self.buffer = self.buffer + byt
 
         def _write():
+            if not self.buffer:
+                # nothing to do. Don't write empty buffers: it upsets the
+                # TLSMemoryBIOProtocol
+                return
+
             if getattr(self.other, "transport") is not None:
                 self.other.dataReceived(self.buffer)
                 self.buffer = b""
@@ -421,7 +429,10 @@ class FakeTransport(object):
 
             self._reactor.callLater(0.0, _write)
 
-        _write()
+        # always actually do the write asynchronously. Some protocols (notably the
+        # TLSMemoryBIOProtocol) get very confused if a read comes back while they are
+        # still doing a write. Doing a callLater here breaks the cycle.
+        self._reactor.callLater(0.0, _write)
 
     def writeSequence(self, seq):
         for x in seq:
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 829f47d2e8..452d76ddd5 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -49,6 +49,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         self.db_pool.runWithConnection = runWithConnection
 
         config = Mock()
+        config._enable_native_upserts = False
         config.event_cache_size = 1
         config.database_config = {"name": "sqlite3"}
         hs = TestHomeServer(
diff --git a/tests/test_dns.py b/tests/test_dns.py
deleted file mode 100644
index 90bd34be34..0000000000
--- a/tests/test_dns.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from mock import Mock
-
-from twisted.internet import defer
-from twisted.names import dns, error
-
-from synapse.http.endpoint import resolve_service
-
-from tests.utils import MockClock
-
-from . import unittest
-
-
-@unittest.DEBUG
-class DnsTestCase(unittest.TestCase):
-    @defer.inlineCallbacks
-    def test_resolve(self):
-        dns_client_mock = Mock()
-
-        service_name = "test_service.example.com"
-        host_name = "example.com"
-
-        answer_srv = dns.RRHeader(
-            type=dns.SRV, payload=dns.Record_SRV(target=host_name)
-        )
-
-        dns_client_mock.lookupService.return_value = defer.succeed(
-            ([answer_srv], None, None)
-        )
-
-        cache = {}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        dns_client_mock.lookupService.assert_called_once_with(service_name)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-        self.assertEquals(servers[0].host, host_name)
-
-    @defer.inlineCallbacks
-    def test_from_cache_expired_and_dns_fail(self):
-        dns_client_mock = Mock()
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
-
-        service_name = "test_service.example.com"
-
-        entry = Mock(spec_set=["expires"])
-        entry.expires = 0
-
-        cache = {service_name: [entry]}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        dns_client_mock.lookupService.assert_called_once_with(service_name)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-
-    @defer.inlineCallbacks
-    def test_from_cache(self):
-        clock = MockClock()
-
-        dns_client_mock = Mock(spec_set=['lookupService'])
-        dns_client_mock.lookupService = Mock(spec_set=[])
-
-        service_name = "test_service.example.com"
-
-        entry = Mock(spec_set=["expires"])
-        entry.expires = 999999999
-
-        cache = {service_name: [entry]}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache, clock=clock
-        )
-
-        self.assertFalse(dns_client_mock.lookupService.called)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-
-    @defer.inlineCallbacks
-    def test_empty_cache(self):
-        dns_client_mock = Mock()
-
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
-
-        service_name = "test_service.example.com"
-
-        cache = {}
-
-        with self.assertRaises(error.DNSServerError):
-            yield resolve_service(service_name, dns_client=dns_client_mock, cache=cache)
-
-    @defer.inlineCallbacks
-    def test_name_error(self):
-        dns_client_mock = Mock()
-
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSNameError())
-
-        service_name = "test_service.example.com"
-
-        cache = {}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        self.assertEquals(len(servers), 0)
-        self.assertEquals(len(cache), 0)
diff --git a/tests/test_server.py b/tests/test_server.py
index 634a8fbca5..08fb3fe02f 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -19,7 +19,7 @@ from six import StringIO
 
 from twisted.internet.defer import Deferred
 from twisted.python.failure import Failure
-from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
+from twisted.test.proto_helpers import AccumulatingProtocol
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
@@ -30,12 +30,18 @@ from synapse.util import Clock
 from synapse.util.logcontext import make_deferred_yieldable
 
 from tests import unittest
-from tests.server import FakeTransport, make_request, render, setup_test_homeserver
+from tests.server import (
+    FakeTransport,
+    ThreadedMemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
 
 
 class JsonResourceTests(unittest.TestCase):
     def setUp(self):
-        self.reactor = MemoryReactorClock()
+        self.reactor = ThreadedMemoryReactorClock()
         self.hs_clock = Clock(self.reactor)
         self.homeserver = setup_test_homeserver(
             self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
diff --git a/tests/unittest.py b/tests/unittest.py
index 78d2f740f9..cda549c783 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -96,7 +96,7 @@ class TestCase(unittest.TestCase):
 
         method = getattr(self, methodName)
 
-        level = getattr(method, "loglevel", getattr(self, "loglevel", logging.ERROR))
+        level = getattr(method, "loglevel", getattr(self, "loglevel", logging.WARNING))
 
         @around(self)
         def setUp(orig):
@@ -333,7 +333,15 @@ class HomeserverTestCase(TestCase):
         """
         kwargs = dict(kwargs)
         kwargs.update(self._hs_args)
-        return setup_test_homeserver(self.addCleanup, *args, **kwargs)
+        hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
+        stor = hs.get_datastore()
+
+        # Run the database background updates.
+        if hasattr(stor, "do_next_background_update"):
+            while not self.get_success(stor.has_completed_background_updates()):
+                self.get_success(stor.do_next_background_update(1))
+
+        return hs
 
     def pump(self, by=0.0):
         """
diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py
new file mode 100644
index 0000000000..84dd71e47a
--- /dev/null
+++ b/tests/util/test_async_utils.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+from twisted.internet.defer import CancelledError, Deferred
+from twisted.internet.task import Clock
+
+from synapse.util import logcontext
+from synapse.util.async_helpers import timeout_deferred
+from synapse.util.logcontext import LoggingContext
+
+from tests.unittest import TestCase
+
+
+class TimeoutDeferredTest(TestCase):
+    def setUp(self):
+        self.clock = Clock()
+
+    def test_times_out(self):
+        """Basic test case that checks that the original deferred is cancelled and that
+        the timing-out deferred is errbacked
+        """
+        cancelled = [False]
+
+        def canceller(_d):
+            cancelled[0] = True
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+        self.assertFalse(cancelled[0], "deferred was cancelled prematurely")
+
+        self.clock.pump((1.0, ))
+
+        self.assertTrue(cancelled[0], "deferred was not cancelled by timeout")
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_times_out_when_canceller_throws(self):
+        """Test that we have successfully worked around
+        https://twistedmatrix.com/trac/ticket/9534"""
+
+        def canceller(_d):
+            raise Exception("can't cancel this deferred")
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+
+        self.clock.pump((1.0, ))
+
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_logcontext_is_preserved_on_cancellation(self):
+        blocking_was_cancelled = [False]
+
+        @defer.inlineCallbacks
+        def blocking():
+            non_completing_d = Deferred()
+            with logcontext.PreserveLoggingContext():
+                try:
+                    yield non_completing_d
+                except CancelledError:
+                    blocking_was_cancelled[0] = True
+                    raise
+
+        with logcontext.LoggingContext("one") as context_one:
+            # the errbacks should be run in the test logcontext
+            def errback(res, deferred_name):
+                self.assertIs(
+                    LoggingContext.current_context(), context_one,
+                    "errback %s run in unexpected logcontext %s" % (
+                        deferred_name, LoggingContext.current_context(),
+                    )
+                )
+                return res
+
+            original_deferred = blocking()
+            original_deferred.addErrback(errback, "orig")
+            timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
+            self.assertNoResult(timing_out_d)
+            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+            timing_out_d.addErrback(errback, "timingout")
+
+            self.clock.pump((1.0, ))
+
+            self.assertTrue(
+                blocking_was_cancelled[0],
+                "non-completing deferred was not cancelled",
+            )
+            self.failureResultOf(timing_out_d, defer.TimeoutError, )
+            self.assertIs(LoggingContext.current_context(), context_one)
diff --git a/tests/utils.py b/tests/utils.py
index 08d6faa0a6..df73c539c3 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -154,7 +154,9 @@ def default_config(name):
     config.update_user_directory = False
 
     def is_threepid_reserved(threepid):
-        return ServerConfig.is_threepid_reserved(config, threepid)
+        return ServerConfig.is_threepid_reserved(
+            config.mau_limits_reserved_threepids, threepid
+        )
 
     config.is_threepid_reserved.side_effect = is_threepid_reserved
 
diff --git a/tox.ini b/tox.ini
index a0f5486829..9b2d78ed6d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -149,4 +149,5 @@ deps =
     codecov
 commands =
     coverage combine
+    coverage xml
     codecov -X gcov